diff --git a/0029-Struct-Reorg-Add-Safe-Structure-Pointer-Compression.patch b/0029-Struct-Reorg-Add-Safe-Structure-Pointer-Compression.patch new file mode 100644 index 0000000000000000000000000000000000000000..7b097470db0ad84ae12342399db7c8334ff23298 --- /dev/null +++ b/0029-Struct-Reorg-Add-Safe-Structure-Pointer-Compression.patch @@ -0,0 +1,1191 @@ +From 7930d75c9fd3f36cc2dce934569f00c71248bb31 Mon Sep 17 00:00:00 2001 +From: liyancheng <412998149@qq.com> +Date: Sat, 25 Nov 2023 10:28:48 +0800 +Subject: [PATCH] [Struct Reorg] Add Safe Structure Pointer Compression + +Safe structure pointer compression allows safely transfer pointers +stored in structure into the index of structure array with smaller +type to reduce the size of structure. +Add flag -fipa-struct-reorg=4 to enable safe structure pointer +compression. +Add param compressed-pointer-size=[8,16,32] to control the compressed +pointer size. +--- + gcc/common.opt | 5 +- + gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 908 ++++++++++++++++++++++- + gcc/ipa-struct-reorg/ipa-struct-reorg.h | 4 + + gcc/params.opt | 4 + + 4 files changed, 882 insertions(+), 39 deletions(-) + +diff --git a/gcc/common.opt b/gcc/common.opt +index b01df919e..f6e20c1e8 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1993,8 +1993,9 @@ Common Var(flag_ipa_struct_reorg) Init(0) Optimization + Perform structure layout optimizations. + + fipa-struct-reorg= +-Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0) IntegerRange(0, 3) +--fipa-struct-reorg=[0,1,2,3] adding none, struct-reorg, reorder-fields, dfe optimizations. ++Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0) IntegerRange(0, 4) ++-fipa-struct-reorg=[0,1,2,3,4] adding none, struct-reorg, reorder-fields, ++dfe, safe-pointer-compression optimizations. + + fipa-vrp + Common Var(flag_ipa_vrp) Optimization +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +index dcc6df496..5d451c4c8 100644 +--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +@@ -89,6 +89,7 @@ along with GCC; see the file COPYING3. If not see + #include "gimple-iterator.h" + #include "gimple-walk.h" + #include "cfg.h" ++#include "cfghooks.h" /* For split_block. */ + #include "ssa.h" + #include "tree-dfa.h" + #include "fold-const.h" +@@ -147,7 +148,27 @@ using namespace struct_relayout; + #define VOID_POINTER_P(type) \ + (POINTER_TYPE_P (type) && VOID_TYPE_P (TREE_TYPE (type))) + +-/* Return true iff TYPE is stdarg va_list type. */ ++static void ++set_var_attributes (tree var) ++{ ++ if (!var) ++ return; ++ gcc_assert (TREE_CODE (var) == VAR_DECL); ++ ++ DECL_ARTIFICIAL (var) = 1; ++ DECL_EXTERNAL (var) = 0; ++ TREE_STATIC (var) = 1; ++ TREE_PUBLIC (var) = 0; ++ TREE_USED (var) = 1; ++ DECL_CONTEXT (var) = NULL; ++ TREE_THIS_VOLATILE (var) = 0; ++ TREE_ADDRESSABLE (var) = 0; ++ TREE_READONLY (var) = 0; ++ if (is_global_var (var)) ++ set_decl_tls_model (var, TLS_MODEL_NONE); ++} ++ ++/* Return true if TYPE is stdarg va_list type. */ + + static inline bool + is_va_list_type (tree type) +@@ -271,9 +292,15 @@ enum struct_layout_opt_level + STRUCT_SPLIT = 1 << 0, + COMPLETE_STRUCT_RELAYOUT = 1 << 1, + STRUCT_REORDER_FIELDS = 1 << 2, +- DEAD_FIELD_ELIMINATION = 1 << 3 ++ DEAD_FIELD_ELIMINATION = 1 << 3, ++ POINTER_COMPRESSION_SAFE = 1 << 4 + }; + ++/* Defines the target pointer size of compressed pointer, which should be 8, ++ 16, 32. */ ++ ++static int compressed_size = 32; ++ + static bool is_result_of_mult (tree arg, tree *num, tree struct_size); + static bool isptrptr (tree type); + void get_base (tree &base, tree expr); +@@ -394,7 +421,10 @@ srtype::srtype (tree type) + : type (type), + chain_type (false), + escapes (does_not_escape), ++ pc_gptr (NULL_TREE), + visited (false), ++ pc_candidate (false), ++ has_legal_alloc_num (false), + has_alloc_array (0) + { + for (int i = 0; i < max_split; i++) +@@ -476,6 +506,31 @@ srtype::mark_escape (escape_type e, gimple *stmt) + } + } + ++/* Create a global header for compressed struct. */ ++ ++void ++srtype::create_global_ptr_for_pc () ++{ ++ if (!pc_candidate || pc_gptr != NULL_TREE) ++ return; ++ ++ const char *type_name = get_type_name (type); ++ gcc_assert (type_name != NULL); ++ ++ char *gptr_name = concat (type_name, "_pc", NULL); ++ tree new_name = get_identifier (gptr_name); ++ tree new_type = build_pointer_type (newtype[0]); ++ tree new_var = build_decl (UNKNOWN_LOCATION, VAR_DECL, new_name, new_type); ++ set_var_attributes (new_var); ++ pc_gptr = new_var; ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "\nType: %s has create global header for pointer" ++ " compression: %s\n", type_name, gptr_name); ++ ++ free (gptr_name); ++} ++ + /* Add FIELD to the list of fields that use this type. */ + + void +@@ -798,15 +853,31 @@ srfield::create_new_reorder_fields (tree newtype[max_split], + fields.safe_push (field); + } + +- DECL_NAME (field) = DECL_NAME (fielddecl); + if (type == NULL) +- /* Common members do not need to reconstruct. ++ { ++ DECL_NAME (field) = DECL_NAME (fielddecl); ++ /* Common members do not need to reconstruct. + Otherwise, int* -> int** or void* -> void**. */ +- TREE_TYPE (field) = nt; ++ TREE_TYPE (field) = nt; ++ SET_DECL_ALIGN (field, DECL_ALIGN (fielddecl)); ++ } ++ else if (type->pc_candidate) ++ { ++ const char *old_name = IDENTIFIER_POINTER (DECL_NAME (fielddecl)); ++ char *new_name = concat (old_name, "_pc", NULL); ++ DECL_NAME (field) = get_identifier (new_name); ++ free (new_name); ++ TREE_TYPE (field) = make_unsigned_type (compressed_size); ++ SET_DECL_ALIGN (field, compressed_size); ++ } + else +- TREE_TYPE (field) = reconstruct_complex_type (TREE_TYPE (fielddecl), nt); ++ { ++ TREE_TYPE (field) = reconstruct_complex_type (TREE_TYPE (fielddecl), nt); ++ DECL_NAME (field) = DECL_NAME (fielddecl); ++ SET_DECL_ALIGN (field, DECL_ALIGN (fielddecl)); ++ } ++ + DECL_SOURCE_LOCATION (field) = DECL_SOURCE_LOCATION (fielddecl); +- SET_DECL_ALIGN (field, DECL_ALIGN (fielddecl)); + DECL_USER_ALIGN (field) = DECL_USER_ALIGN (fielddecl); + TREE_ADDRESSABLE (field) = TREE_ADDRESSABLE (fielddecl); + DECL_NONADDRESSABLE_P (field) = !TREE_ADDRESSABLE (fielddecl); +@@ -925,6 +996,10 @@ srtype::create_new_type (void) + && has_dead_field ()) + fprintf (dump_file, "Dead field elimination.\n"); + } ++ ++ if (pc_candidate && pc_gptr == NULL_TREE) ++ create_global_ptr_for_pc (); ++ + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "Created %d types:\n", maxclusters); +@@ -1338,6 +1413,30 @@ public: + + unsigned execute_struct_relayout (void); + bool remove_dead_field_stmt (tree lhs); ++ ++ // Pointer compression methods: ++ void check_and_prune_struct_for_pointer_compression (void); ++ void try_rewrite_with_pointer_compression (gassign *, gimple_stmt_iterator *, ++ tree, tree, tree &, tree &); ++ bool safe_void_cmp_p (tree, srtype *); ++ bool pc_candidate_st_type_p (tree); ++ bool pc_candidate_tree_p (tree); ++ bool pc_type_conversion_candidate_p (tree); ++ bool pc_direct_rewrite_chance_p (tree, tree &); ++ bool compress_candidate_with_check (gimple_stmt_iterator *, tree, tree &); ++ bool compress_candidate (gassign *, gimple_stmt_iterator *, tree, tree &); ++ bool decompress_candidate_with_check (gimple_stmt_iterator *, tree, tree &); ++ bool decompress_candidate (gimple_stmt_iterator *, tree, tree, tree &, ++ tree &); ++ srtype *get_compression_candidate_type (tree); ++ tree compress_ptr_to_offset (tree, srtype *, gimple_stmt_iterator *); ++ tree decompress_offset_to_ptr (tree, srtype *, gimple_stmt_iterator *); ++ basic_block create_bb_for_compress_candidate (basic_block, tree, srtype *, ++ tree &); ++ basic_block create_bb_for_decompress_candidate (basic_block, tree, srtype *, ++ tree &); ++ basic_block create_bb_for_compress_nullptr (basic_block, tree &); ++ basic_block create_bb_for_decompress_nullptr (basic_block, tree, tree &); + }; + + struct ipa_struct_relayout +@@ -1386,26 +1485,6 @@ namespace { + + /* Methods for ipa_struct_relayout. */ + +-static void +-set_var_attributes (tree var) +-{ +- if (!var) +- return; +- gcc_assert (TREE_CODE (var) == VAR_DECL); +- +- DECL_ARTIFICIAL (var) = 1; +- DECL_EXTERNAL (var) = 0; +- TREE_STATIC (var) = 1; +- TREE_PUBLIC (var) = 0; +- TREE_USED (var) = 1; +- DECL_CONTEXT (var) = NULL; +- TREE_THIS_VOLATILE (var) = 0; +- TREE_ADDRESSABLE (var) = 0; +- TREE_READONLY (var) = 0; +- if (is_global_var (var)) +- set_decl_tls_model (var, TLS_MODEL_NONE); +-} +- + tree + ipa_struct_relayout::create_new_vars (tree type, const char *name) + { +@@ -2985,6 +3064,19 @@ ipa_struct_reorg::find_vars (gimple *stmt) + records the right value _1 declaration. */ + find_var (gimple_assign_rhs1 (stmt), stmt); + ++ /* Pointer types from non-zero pointer need to be escaped in pointer ++ compression and complete relayout. ++ e.g _1->t = (struct *) 0x400000. */ ++ if (current_layout_opt_level >= COMPLETE_STRUCT_RELAYOUT ++ && TREE_CODE (lhs) == COMPONENT_REF ++ && TREE_CODE (TREE_TYPE (lhs)) == POINTER_TYPE ++ && TREE_CODE (rhs) == INTEGER_CST ++ && !integer_zerop (rhs)) ++ { ++ mark_type_as_escape (inner_type (TREE_TYPE (lhs)), ++ escape_cast_int, stmt); ++ } ++ + /* Add a safe func mechanism. */ + bool l_find = true; + bool r_find = true; +@@ -3436,12 +3528,13 @@ is_result_of_mult (tree arg, tree *num, tree struct_size) + bool + ipa_struct_reorg::handled_allocation_stmt (gimple *stmt) + { +- if ((current_layout_opt_level >= STRUCT_REORDER_FIELDS) ++ if ((current_layout_opt_level & STRUCT_REORDER_FIELDS) + && (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC) + || gimple_call_builtin_p (stmt, BUILT_IN_MALLOC) + || gimple_call_builtin_p (stmt, BUILT_IN_CALLOC))) + return true; +- if ((current_layout_opt_level == COMPLETE_STRUCT_RELAYOUT) ++ if ((current_layout_opt_level == COMPLETE_STRUCT_RELAYOUT ++ || current_layout_opt_level & POINTER_COMPRESSION_SAFE) + && gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)) + return true; + if ((current_layout_opt_level == STRUCT_SPLIT) +@@ -3563,14 +3656,19 @@ ipa_struct_reorg::maybe_mark_or_record_other_side (tree side, tree other, + } + } + /* x_1 = y.x_nodes; void *x; +- Directly mark the structure pointer type assigned +- to the void* variable as escape. */ ++ Mark the structure pointer type assigned ++ to the void* variable as escape. Unless the void* is only used to compare ++ with variables of the same type. */ + else if (current_layout_opt_level >= STRUCT_REORDER_FIELDS + && TREE_CODE (side) == SSA_NAME + && VOID_POINTER_P (TREE_TYPE (side)) + && SSA_NAME_VAR (side) + && VOID_POINTER_P (TREE_TYPE (SSA_NAME_VAR (side)))) +- mark_type_as_escape (TREE_TYPE (other), escape_cast_void, stmt); ++ if (current_layout_opt_level < POINTER_COMPRESSION_SAFE ++ || !safe_void_cmp_p (side, type)) ++ { ++ mark_type_as_escape (TREE_TYPE (other), escape_cast_void, stmt); ++ } + + check_ptr_layers (side, other, stmt); + } +@@ -4181,7 +4279,7 @@ ipa_struct_reorg::check_type_and_push (tree newdecl, srdecl *decl, + void + ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type) + { +- if (current_layout_opt_level == COMPLETE_STRUCT_RELAYOUT ++ if (current_layout_opt_level >= COMPLETE_STRUCT_RELAYOUT + && handled_allocation_stmt (stmt)) + { + tree arg0 = gimple_call_arg (stmt, 0); +@@ -4200,6 +4298,23 @@ ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type) + type->has_alloc_array = type->has_alloc_array < 0 + ? type->has_alloc_array + : type->has_alloc_array + 1; ++ ++ if (current_layout_opt_level & POINTER_COMPRESSION_SAFE ++ && TREE_CODE (arg0) == INTEGER_CST) ++ { ++ /* Only known size during compilation can be optimized ++ at this level. */ ++ unsigned HOST_WIDE_INT max_alloc_size = 0; ++ switch (compressed_size) ++ { ++ case 8: max_alloc_size = 0xff; break; // max of uint8 ++ case 16: max_alloc_size = 0xffff; break; // max of uint16 ++ case 32: max_alloc_size = 0xffffffff; break; // max of uint32 ++ default: gcc_unreachable (); break; ++ } ++ if (tree_to_uhwi (arg0) < max_alloc_size) ++ type->has_legal_alloc_num = true; ++ } + } + } + +@@ -4328,7 +4443,13 @@ ipa_struct_reorg::check_definition (srdecl *decl, vec &worklist) + if (current_layout_opt_level >= STRUCT_REORDER_FIELDS + && SSA_NAME_VAR (ssa_name) + && VOID_POINTER_P (TREE_TYPE (SSA_NAME_VAR (ssa_name)))) +- type->mark_escape (escape_cast_void, SSA_NAME_DEF_STMT (ssa_name)); ++ { ++ if (current_layout_opt_level < POINTER_COMPRESSION_SAFE ++ || !safe_void_cmp_p (ssa_name, type)) ++ { ++ type->mark_escape (escape_cast_void, SSA_NAME_DEF_STMT (ssa_name)); ++ } ++ } + gimple *stmt = SSA_NAME_DEF_STMT (ssa_name); + + /* +@@ -5294,6 +5415,8 @@ ipa_struct_reorg::create_new_types (void) + for (unsigned i = 0; i < types.length (); i++) + newtypes += types[i]->create_new_type (); + ++ /* Some new types may not have been created at create_new_type (), so ++ recreate new type for all struct fields. */ + if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + { + for (unsigned i = 0; i < types.length (); i++) +@@ -5304,9 +5427,18 @@ ipa_struct_reorg::create_new_types (void) + for (unsigned j = 0; j < fields->length (); j++) + { + tree field = (*fields)[j]; +- TREE_TYPE (field) +- = reconstruct_complex_type (TREE_TYPE (field), +- types[i]->newtype[0]); ++ if (types[i]->pc_candidate) ++ { ++ TREE_TYPE (field) ++ = make_unsigned_type (compressed_size); ++ SET_DECL_ALIGN (field, compressed_size); ++ } ++ else ++ { ++ TREE_TYPE (field) ++ = reconstruct_complex_type (TREE_TYPE (field), ++ types[i]->newtype[0]); ++ } + } + } + } +@@ -5685,6 +5817,554 @@ ipa_struct_reorg::rewrite_expr (tree expr, + return true; + } + ++/* Emit a series of gimples to compress the pointer to the index relative to ++ the global header. The basic blocks where gsi is located must have at least ++ one stmt. */ ++ ++tree ++ipa_struct_reorg::compress_ptr_to_offset (tree xhs, srtype *type, ++ gimple_stmt_iterator *gsi) ++{ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nCompress candidate pointer:\n"); ++ print_generic_expr (dump_file, xhs); ++ fprintf (dump_file, "\nto offset:\n"); ++ } ++ ++ /* Emit gimple _X1 = ptr - gptr. */ ++ tree pointer_addr = fold_convert (long_unsigned_type_node, xhs); ++ tree gptr_addr = fold_convert (long_unsigned_type_node, type->pc_gptr); ++ tree step1 = gimplify_build2 (gsi, MINUS_EXPR, long_unsigned_type_node, ++ pointer_addr, gptr_addr); ++ ++ /* Emit gimple _X2 = _X1 / sizeof (struct). */ ++ tree step2 = gimplify_build2 (gsi, TRUNC_DIV_EXPR, long_unsigned_type_node, ++ step1, TYPE_SIZE_UNIT (type->newtype[0])); ++ ++ /* Emit gimple _X3 = _X2 + 1. */ ++ tree step3 = gimplify_build2 (gsi, PLUS_EXPR, long_unsigned_type_node, ++ step2, build_one_cst (long_unsigned_type_node)); ++ ++ /* Emit _X4 = (compressed_size) _X3. */ ++ tree step4 = gimplify_build1 (gsi, NOP_EXPR, ++ make_unsigned_type (compressed_size), step3); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ print_generic_expr (dump_file, step3); ++ fprintf (dump_file, "\n"); ++ } ++ return step4; ++} ++ ++/* Emit a series of gimples to decompress the index into the original ++ pointer. The basic blocks where gsi is located must have at least ++ one stmt. */ ++ ++tree ++ipa_struct_reorg::decompress_offset_to_ptr (tree xhs, srtype *type, ++ gimple_stmt_iterator *gsi) ++{ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nDecompress candidate offset:\n"); ++ print_generic_expr (dump_file, xhs); ++ fprintf (dump_file, "\nto pointer:\n"); ++ } ++ ++ /* Emit _X1 = xhs - 1. */ ++ tree offset = fold_convert (long_unsigned_type_node, xhs); ++ tree step1 = gimplify_build2 (gsi, MINUS_EXPR, long_unsigned_type_node, ++ offset, ++ build_one_cst (long_unsigned_type_node)); ++ ++ /* Emit _X2 = _X1 * sizeof (struct). */ ++ tree step2 = gimplify_build2 (gsi, MULT_EXPR, long_unsigned_type_node, ++ step1, TYPE_SIZE_UNIT (type->newtype[0])); ++ ++ /* Emit _X3 = phead + _X2. */ ++ tree gptr_addr = fold_convert (long_unsigned_type_node, type->pc_gptr); ++ tree step3 = gimplify_build2 (gsi, PLUS_EXPR, long_unsigned_type_node, ++ gptr_addr, step2); ++ ++ /* Emit _X4 = (struct *) _X3. */ ++ tree step4 = gimplify_build1 (gsi, NOP_EXPR, TREE_TYPE (type->pc_gptr), ++ step3); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ print_generic_expr (dump_file, step3); ++ fprintf (dump_file, "\n"); ++ } ++ return step4; ++} ++ ++/* Return the compression candidate srtype of SSA_NAME or COMPONENT_REF. */ ++ ++srtype * ++ipa_struct_reorg::get_compression_candidate_type (tree xhs) ++{ ++ if (xhs == NULL_TREE) ++ return NULL; ++ ++ if (TREE_CODE (xhs) == SSA_NAME || TREE_CODE (xhs) == COMPONENT_REF) ++ { ++ srtype *access_type = find_type (inner_type (TREE_TYPE (xhs))); ++ if (access_type != NULL && access_type->pc_candidate) ++ return access_type; ++ } ++ return NULL; ++} ++ ++/* True if the input type is the candidate type for pointer compression. */ ++ ++bool ++ipa_struct_reorg::pc_candidate_st_type_p (tree type) ++{ ++ if (type == NULL_TREE) ++ return false; ++ ++ if (TREE_CODE (type) == POINTER_TYPE) ++ { ++ if (TREE_CODE (TREE_TYPE (type)) == RECORD_TYPE) ++ { ++ srtype *access_type = find_type (TREE_TYPE (type)); ++ if (access_type != NULL && access_type->pc_candidate) ++ return true; ++ } ++ } ++ return false; ++} ++ ++/* True if the input xhs is a candidate for pointer compression. */ ++ ++bool ++ipa_struct_reorg::pc_candidate_tree_p (tree xhs) ++{ ++ if (xhs == NULL_TREE) ++ return false; ++ ++ if (TREE_CODE (xhs) == COMPONENT_REF) ++ { ++ srtype *base_type = find_type (TREE_TYPE (TREE_OPERAND (xhs, 0))); ++ if (base_type == NULL || base_type->has_escaped ()) ++ return false; ++ ++ return pc_candidate_st_type_p (TREE_TYPE (xhs)); ++ } ++ return false; ++} ++ ++/* True if xhs is a component_ref that base has escaped but uses a compression ++ candidate type. */ ++ ++bool ++ipa_struct_reorg::pc_type_conversion_candidate_p (tree xhs) ++{ ++ if (xhs == NULL_TREE) ++ return false; ++ ++ if (TREE_CODE (xhs) == COMPONENT_REF) ++ { ++ srtype *base_type = find_type (TREE_TYPE (TREE_OPERAND (xhs, 0))); ++ if (base_type != NULL && base_type->has_escaped ()) ++ return pc_candidate_st_type_p (TREE_TYPE (xhs)); ++ ++ } ++ return false; ++} ++ ++/* Creates a new basic block with zero for compressed null pointers. */ ++ ++basic_block ++ipa_struct_reorg::create_bb_for_compress_nullptr (basic_block last_bb, ++ tree &phi) ++{ ++ basic_block new_bb = create_empty_bb (last_bb); ++ if (last_bb->loop_father != NULL) ++ { ++ add_bb_to_loop (new_bb, last_bb->loop_father); ++ loops_state_set (LOOPS_NEED_FIXUP); ++ } ++ ++ /* Emit phi = 0. */ ++ gimple_stmt_iterator gsi = gsi_last_bb (new_bb); ++ phi = make_ssa_name (make_unsigned_type (compressed_size)); ++ tree rhs = build_int_cst (make_unsigned_type (compressed_size), 0); ++ gimple *new_stmt = gimple_build_assign (phi, rhs); ++ gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nCreate bb %d for compress nullptr:\n", ++ new_bb->index); ++ gimple_dump_bb (dump_file, new_bb, 0, dump_flags); ++ } ++ return new_bb; ++} ++ ++/* Create a new basic block to compress the pointer to the index relative to ++ the allocated memory pool header. */ ++ ++basic_block ++ipa_struct_reorg::create_bb_for_compress_candidate (basic_block last_bb, ++ tree new_rhs, srtype *type, ++ tree &phi) ++{ ++ basic_block new_bb = create_empty_bb (last_bb); ++ if (last_bb->loop_father != NULL) ++ { ++ add_bb_to_loop (new_bb, last_bb->loop_father); ++ loops_state_set (LOOPS_NEED_FIXUP); ++ } ++ ++ gimple_stmt_iterator gsi = gsi_last_bb (new_bb); ++ /* compress_ptr_to_offset () needs at least one stmt in target bb. */ ++ gsi_insert_after (&gsi, gimple_build_nop (), GSI_NEW_STMT); ++ phi = compress_ptr_to_offset (new_rhs, type, &gsi); ++ /* Remove the NOP created above. */ ++ gsi_remove (&gsi, true); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nCreate bb %d for compress candidate:\n", ++ new_bb->index); ++ gimple_dump_bb (dump_file, new_bb, 0, dump_flags); ++ } ++ return new_bb; ++} ++ ++/* Compression can be simplified by these following cases: ++ 1. if rhs is NULL, uses zero to represent it. ++ 2. if new_rhs has been converted into INTEGER_TYPE in the previous stmt, ++ just use it here. For example: ++ _1 = t->s ++ -> tt->s = _1. */ ++ ++bool ++ipa_struct_reorg::pc_direct_rewrite_chance_p (tree rhs, tree &new_rhs) ++{ ++ if (integer_zerop (rhs)) ++ { ++ new_rhs = build_int_cst (make_unsigned_type (compressed_size), 0); ++ return true; ++ } ++ else if (new_rhs && TREE_CODE (TREE_TYPE (new_rhs)) == INTEGER_TYPE) ++ { ++ return true; ++ } ++ return false; ++} ++ ++/* Perform pointer compression with check. The conversion will be as shown in ++ the following example: ++ Orig bb: ++ bb <1>: ++ _1->t = _2 ++ ++ will be transformed to: ++ bb <1>: ++ _3 = _2 ++ if (_2 == NULL) ++ goto bb <2> ++ else ++ goto bb <3> ++ ++ bb <2>: ++ _3 = 0 ++ goto bb <4> ++ ++ bb <3>: ++ ... ++ _4 = compress (_2) ++ goto bb <4> ++ ++ bb <4>: ++ _5 = PHI (_3, _4) ++ _1->t = _5 ++ The gsi will move to the beginning of split dst bb <4>, _1->t = _5 will be ++ emitted by rewrite_assign (). */ ++ ++bool ++ipa_struct_reorg::compress_candidate_with_check (gimple_stmt_iterator *gsi, ++ tree rhs, tree &new_rhs) ++{ ++ tree cond_lhs = make_ssa_name (TREE_TYPE (new_rhs)); ++ gimple *assign_stmt = gimple_build_assign (cond_lhs, new_rhs); ++ gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT); ++ ++ /* Insert cond stmt. */ ++ tree rhs_pointer_type = build_pointer_type (TREE_TYPE (new_rhs)); ++ gcond *cond = gimple_build_cond (EQ_EXPR, cond_lhs, ++ build_int_cst (rhs_pointer_type, 0), ++ NULL_TREE, NULL_TREE); ++ gimple_set_location (cond, UNKNOWN_LOCATION); ++ gsi_insert_before (gsi, cond, GSI_SAME_STMT); ++ ++ edge e = split_block (cond->bb, cond); ++ basic_block split_src_bb = e->src; ++ basic_block split_dst_bb = e->dest; ++ ++ /* Create bb for nullptr. */ ++ tree phi1 = NULL_TREE; ++ basic_block true_bb = create_bb_for_compress_nullptr (split_src_bb, phi1); ++ ++ /* Create bb for comprssion. */ ++ srtype *type = get_compression_candidate_type (rhs); ++ gcc_assert (type != NULL); ++ tree phi2 = NULL_TREE; ++ basic_block false_bb = create_bb_for_compress_candidate (true_bb, new_rhs, ++ type, phi2); ++ ++ /* Rebuild and reset cfg. */ ++ remove_edge_raw (e); ++ ++ edge etrue = make_edge (split_src_bb, true_bb, EDGE_TRUE_VALUE); ++ etrue->probability = profile_probability::unlikely (); ++ true_bb->count = etrue->count (); ++ ++ edge efalse = make_edge (split_src_bb, false_bb, EDGE_FALSE_VALUE); ++ efalse->probability = profile_probability::likely (); ++ false_bb->count = efalse->count (); ++ ++ edge e1 = make_single_succ_edge (true_bb, split_dst_bb, EDGE_FALLTHRU); ++ edge e2 = make_single_succ_edge (false_bb, split_dst_bb, EDGE_FALLTHRU); ++ ++ tree phi = make_ssa_name (make_unsigned_type (compressed_size)); ++ gphi *phi_node = create_phi_node (phi, split_dst_bb); ++ add_phi_arg (phi_node, phi1, e1, UNKNOWN_LOCATION); ++ add_phi_arg (phi_node, phi2, e2, UNKNOWN_LOCATION); ++ ++ if (dom_info_available_p (CDI_DOMINATORS)) ++ { ++ set_immediate_dominator (CDI_DOMINATORS, split_dst_bb, split_src_bb); ++ set_immediate_dominator (CDI_DOMINATORS, true_bb, split_src_bb); ++ set_immediate_dominator (CDI_DOMINATORS, false_bb, split_src_bb); ++ } ++ *gsi = gsi_start_bb (split_dst_bb); ++ new_rhs = phi; ++ return true; ++} ++ ++/* If there is a direct rewrite chance or simplification opportunity, perform ++ the simplified compression rewrite. Otherwise, create a cond expression and ++ two basic blocks to implement pointer compression. */ ++ ++bool ++ipa_struct_reorg::compress_candidate (gassign *stmt, gimple_stmt_iterator *gsi, ++ tree rhs, tree &new_rhs) ++{ ++ if (pc_direct_rewrite_chance_p (rhs, new_rhs)) ++ return true; ++ ++ return compress_candidate_with_check (gsi, rhs, new_rhs); ++} ++ ++/* Create a new basic block to decompress the index to null pointer. */ ++ ++basic_block ++ipa_struct_reorg::create_bb_for_decompress_nullptr (basic_block last_bb, ++ tree new_rhs, ++ tree &phi_node) ++{ ++ basic_block new_bb = create_empty_bb (last_bb); ++ if (last_bb->loop_father != NULL) ++ { ++ add_bb_to_loop (new_bb, last_bb->loop_father); ++ loops_state_set (LOOPS_NEED_FIXUP); ++ } ++ gimple_stmt_iterator gsi = gsi_last_bb (new_bb); ++ tree rhs_pointer_type = build_pointer_type (TREE_TYPE (new_rhs)); ++ phi_node = make_ssa_name (rhs_pointer_type); ++ gimple *new_stmt = gimple_build_assign (phi_node, ++ build_int_cst (rhs_pointer_type, 0)); ++ gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nCreate bb %d for decompress nullptr:\n", ++ new_bb->index); ++ gimple_dump_bb (dump_file, new_bb, 0, dump_flags); ++ } ++ return new_bb; ++} ++ ++/* Create a new basic block to decompress the index into original pointer. */ ++ ++basic_block ++ipa_struct_reorg::create_bb_for_decompress_candidate (basic_block last_bb, ++ tree lhs, srtype *type, ++ tree &phi_node) ++{ ++ basic_block new_bb = create_empty_bb (last_bb); ++ if (last_bb->loop_father != NULL) ++ { ++ add_bb_to_loop (new_bb, last_bb->loop_father); ++ loops_state_set (LOOPS_NEED_FIXUP); ++ } ++ gimple_stmt_iterator gsi = gsi_last_bb (new_bb); ++ /* decompress_ptr_to_offset () needs at least one stmt in target bb. */ ++ gsi_insert_after (&gsi, gimple_build_nop (), GSI_NEW_STMT); ++ phi_node = decompress_offset_to_ptr (lhs, type, &gsi); ++ /* Remove the NOP created above. */ ++ gsi_remove (&gsi, true); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nCreate bb %d for decompress candidate:\n", ++ new_bb->index); ++ gimple_dump_bb (dump_file, new_bb, 0, dump_flags); ++ } ++ return new_bb; ++} ++ ++/* Perform pointer decompression with check. The conversion will be as shown ++ in the following example: ++ Orig bb: ++ bb <1>: ++ _1 = _2->t ++ ++ will be transformed to: ++ bb <1>: ++ _3 = _2->t ++ if (_3 == 0) ++ goto bb <2> ++ else ++ goto bb <3> ++ ++ bb <2>: ++ _4 = NULL ++ goto bb <4> ++ ++ bb <3>: ++ ... ++ _5 = decompress (_3) ++ goto bb <4> ++ ++ bb <4>: ++ _6 = PHI (_4, _5) ++ _1 = _6 ++ The gsi will move to the beginning of split dst bb <4>, _1 = _6 will be ++ emitted by rewrite_assign (). */ ++ ++bool ++ipa_struct_reorg::decompress_candidate_with_check (gimple_stmt_iterator *gsi, ++ tree rhs, tree &new_rhs) ++{ ++ /* Insert cond stmt. */ ++ tree cond_lhs = make_ssa_name (TREE_TYPE (new_rhs)); ++ gassign *cond_assign = gimple_build_assign (cond_lhs, new_rhs); ++ gsi_insert_before (gsi, cond_assign, GSI_SAME_STMT); ++ ++ tree pc_type = make_unsigned_type (compressed_size); ++ gcond *cond = gimple_build_cond (EQ_EXPR, cond_lhs, ++ build_int_cst (pc_type, 0), ++ NULL_TREE, NULL_TREE); ++ gimple_set_location (cond, UNKNOWN_LOCATION); ++ gsi_insert_before (gsi, cond, GSI_SAME_STMT); ++ ++ /* Split bb. */ ++ edge e = split_block (cond->bb, cond); ++ basic_block split_src_bb = e->src; ++ basic_block split_dst_bb = e->dest; ++ ++ /* Create bb for decompress nullptr. */ ++ tree phi1 = NULL_TREE; ++ basic_block true_bb = create_bb_for_decompress_nullptr (split_src_bb, ++ new_rhs, phi1); ++ ++ /* Create bb for decomprssion candidate. */ ++ tree phi2 = NULL_TREE; ++ srtype *type = get_compression_candidate_type (rhs); ++ gcc_assert (type != NULL); ++ basic_block false_bb = create_bb_for_decompress_candidate (true_bb, cond_lhs, ++ type, phi2); ++ ++ /* Refresh and reset cfg. */ ++ remove_edge_raw (e); ++ ++ edge etrue = make_edge (split_src_bb, true_bb, EDGE_TRUE_VALUE); ++ etrue->probability = profile_probability::unlikely (); ++ true_bb->count = etrue->count (); ++ ++ edge efalse = make_edge (split_src_bb, false_bb, EDGE_FALSE_VALUE); ++ efalse->probability = profile_probability::likely (); ++ false_bb->count = efalse->count (); ++ ++ edge e1 = make_single_succ_edge (true_bb, split_dst_bb, EDGE_FALLTHRU); ++ edge e2 = make_single_succ_edge (false_bb, split_dst_bb, EDGE_FALLTHRU); ++ ++ tree phi = make_ssa_name (build_pointer_type (TREE_TYPE (cond_lhs))); ++ gphi *phi_node = create_phi_node (phi, split_dst_bb); ++ add_phi_arg (phi_node, phi1, e1, UNKNOWN_LOCATION); ++ add_phi_arg (phi_node, phi2, e2, UNKNOWN_LOCATION); ++ ++ if (dom_info_available_p (CDI_DOMINATORS)) ++ { ++ set_immediate_dominator (CDI_DOMINATORS, split_dst_bb, split_src_bb); ++ set_immediate_dominator (CDI_DOMINATORS, true_bb, split_src_bb); ++ set_immediate_dominator (CDI_DOMINATORS, false_bb, split_src_bb); ++ } ++ *gsi = gsi_start_bb (split_dst_bb); ++ new_rhs = phi; ++ return true; ++} ++ ++/* If there is a simplification opportunity, perform the simplified ++ decompression rewrite. Otherwise, create a cond expression and two basic ++ blocks to implement pointer decompression. */ ++ ++bool ++ipa_struct_reorg::decompress_candidate (gimple_stmt_iterator *gsi, ++ tree lhs, tree rhs, tree &new_lhs, ++ tree &new_rhs) ++{ ++ // TODO: simplifiy check and rewrite will be pushed in next PR. ++ return decompress_candidate_with_check (gsi, rhs, new_rhs); ++} ++ ++/* Try to perform pointer compression and decompression. */ ++ ++void ++ipa_struct_reorg::try_rewrite_with_pointer_compression (gassign *stmt, ++ gimple_stmt_iterator ++ *gsi, tree lhs, ++ tree rhs, tree &new_lhs, ++ tree &new_rhs) ++{ ++ bool l = pc_candidate_tree_p (lhs); ++ bool r = pc_candidate_tree_p (rhs); ++ if (!l && !r) ++ { ++ tree tmp_rhs = new_rhs == NULL_TREE ? rhs : new_rhs; ++ if (pc_type_conversion_candidate_p (lhs)) ++ { ++ /* Transfer MEM[(struct *)_1].files = _4; ++ to MEM[(struct *)_1].files = (struct *)_4; */ ++ new_rhs = fold_convert (TREE_TYPE (lhs), tmp_rhs); ++ } ++ else if (pc_type_conversion_candidate_p (rhs)) ++ { ++ /* Transfer _4 = MEM[(struct *)_1].nodes; ++ to _4 = (new_struct *) MEM[(struct *)_1].nodes; */ ++ new_rhs = fold_convert (TREE_TYPE (new_lhs), tmp_rhs); ++ } ++ } ++ else if (l && r) ++ gcc_unreachable (); ++ else if (l) ++ { ++ if (!compress_candidate (stmt, gsi, rhs, new_rhs)) ++ gcc_unreachable (); ++ } ++ else if (r) ++ { ++ if (!decompress_candidate (gsi, lhs, rhs, new_lhs, new_rhs)) ++ gcc_unreachable (); ++ } ++} ++ + bool + ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + { +@@ -5880,6 +6560,9 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + fprintf (dump_file, "\nreplaced with:\n"); + for (unsigned i = 0; i < max_split && (newlhs[i] || newrhs[i]); i++) + { ++ if (current_layout_opt_level >= POINTER_COMPRESSION_SAFE) ++ try_rewrite_with_pointer_compression (stmt, gsi, lhs, rhs, ++ newlhs[i], newrhs[i]); + gimple *newstmt = gimple_build_assign (newlhs[i] ? newlhs[i] : lhs, + newrhs[i] ? newrhs[i] : rhs); + if (dump_file && (dump_flags & TDF_DETAILS)) +@@ -5956,6 +6639,13 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi) + gcc_assert (false); + gimple_call_set_lhs (g, decl->newdecl[i]); + gsi_insert_before (gsi, g, GSI_SAME_STMT); ++ if (type->pc_candidate) ++ { ++ /* Init global header for pointer compression. */ ++ gassign *gptr ++ = gimple_build_assign (type->pc_gptr, decl->newdecl[i]); ++ gsi_insert_before (gsi, gptr, GSI_SAME_STMT); ++ } + } + return true; + } +@@ -6411,6 +7101,12 @@ ipa_struct_reorg::rewrite_functions (void) + push_cfun (DECL_STRUCT_FUNCTION (node->decl)); + current_function = f; + ++ if (current_layout_opt_level >= POINTER_COMPRESSION_SAFE) ++ { ++ calculate_dominance_info (CDI_DOMINATORS); ++ loop_optimizer_init (0); ++ } ++ + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "\nBefore rewrite: %dth_%s\n", +@@ -6486,6 +7182,9 @@ ipa_struct_reorg::rewrite_functions (void) + + free_dominance_info (CDI_DOMINATORS); + ++ if (current_layout_opt_level >= POINTER_COMPRESSION_SAFE) ++ loop_optimizer_finalize (); ++ + if (dump_file) + { + fprintf (dump_file, "\nAfter rewrite: %dth_%s\n", +@@ -6514,6 +7213,8 @@ ipa_struct_reorg::execute_struct_relayout (void) + continue; + if (types[i]->chain_type) + continue; ++ if (get_type_name (types[i]->type) == NULL) ++ continue; + retval |= ipa_struct_relayout (type, this).execute (); + } + +@@ -6530,6 +7231,131 @@ ipa_struct_reorg::execute_struct_relayout (void) + return retval; + } + ++/* True if the var with void type is only used to compare with the same ++ target type. */ ++ ++bool ++ipa_struct_reorg::safe_void_cmp_p (tree var, srtype *type) ++{ ++ imm_use_iterator imm_iter; ++ use_operand_p use_p; ++ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var) ++ { ++ gimple *use_stmt = USE_STMT (use_p); ++ if (is_gimple_debug (use_stmt)) ++ continue; ++ ++ if (gimple_code (use_stmt) == GIMPLE_COND) ++ { ++ tree lhs = gimple_cond_lhs (use_stmt); ++ tree rhs = gimple_cond_rhs (use_stmt); ++ tree xhs = lhs == var ? rhs : lhs; ++ if (types_compatible_p (inner_type (TREE_TYPE (xhs)), type->type)) ++ continue; ++ ++ } ++ return false; ++ } ++ return true; ++} ++ ++/* Mark the structure that should perform pointer compression. */ ++ ++void ++ipa_struct_reorg::check_and_prune_struct_for_pointer_compression (void) ++{ ++ unsigned pc_transform_num = 0; ++ ++ if (dump_file) ++ fprintf (dump_file, "\nMark the structure that should perform pointer" ++ " compression:\n"); ++ ++ for (unsigned i = 0; i < types.length (); i++) ++ { ++ srtype *type = types[i]; ++ if (dump_file) ++ print_generic_expr (dump_file, type->type); ++ ++ if (type->has_escaped ()) ++ { ++ if (dump_file) ++ fprintf (dump_file, " has escaped by %s, skip compression.\n", ++ type->escape_reason ()); ++ continue; ++ } ++ if (TYPE_FIELDS (type->type) == NULL) ++ { ++ if (dump_file) ++ fprintf (dump_file, " has zero field, skip compression.\n"); ++ continue; ++ } ++ if (type->chain_type) ++ { ++ if (dump_file) ++ fprintf (dump_file, " is chain_type, skip compression.\n"); ++ continue; ++ } ++ if (type->has_alloc_array != 1) ++ { ++ if (dump_file) ++ fprintf (dump_file, " has alloc number: %d, skip compression.\n", ++ type->has_alloc_array); ++ continue; ++ } ++ if (get_type_name (type->type) == NULL) ++ { ++ if (dump_file) ++ fprintf (dump_file, " has empty struct name," ++ " skip compression.\n"); ++ continue; ++ } ++ if ((current_layout_opt_level & POINTER_COMPRESSION_SAFE) ++ && !type->has_legal_alloc_num) ++ { ++ if (dump_file) ++ fprintf (dump_file, " has illegal struct array size," ++ " skip compression.\n"); ++ continue; ++ } ++ pc_transform_num++; ++ type->pc_candidate = true; ++ if (dump_file) ++ fprintf (dump_file, " attemps to do pointer compression.\n"); ++ } ++ ++ if (dump_file) ++ { ++ if (pc_transform_num) ++ fprintf (dump_file, "\nNumber of structures to transform in " ++ "pointer compression is %d\n", pc_transform_num); ++ else ++ fprintf (dump_file, "\nNo structures to transform in " ++ "pointer compression.\n"); ++ } ++} ++ ++/* Init pointer size from parameter param_pointer_compression_size. */ ++ ++static void ++init_pointer_size_for_pointer_compression (void) ++{ ++ switch (param_pointer_compression_size) ++ { ++ case 8: ++ compressed_size = 8; // sizeof (uint8) ++ break; ++ case 16: ++ compressed_size = 16; // sizeof (uint16) ++ break; ++ case 32: ++ compressed_size = 32; // sizeof (uint32) ++ break; ++ default: ++ error ("Invalid pointer compression size, using the following param: " ++ "\"--param compressed-pointer-size=[8,16,32]\""); ++ } ++} ++ + unsigned int + ipa_struct_reorg::execute (unsigned int opt) + { +@@ -6551,6 +7377,8 @@ ipa_struct_reorg::execute (unsigned int opt) + if (current_layout_opt_level == STRUCT_SPLIT) + analyze_types (); + ++ if (opt >= POINTER_COMPRESSION_SAFE) ++ check_and_prune_struct_for_pointer_compression (); + ret = rewrite_functions (); + } + else +@@ -6598,6 +7426,8 @@ public: + unsigned int level = 0; + switch (struct_layout_optimize_level) + { ++ case 4: level |= POINTER_COMPRESSION_SAFE; ++ // FALLTHRU + case 3: level |= DEAD_FIELD_ELIMINATION; + // FALLTHRU + case 2: level |= STRUCT_REORDER_FIELDS; +@@ -6609,6 +7439,10 @@ public: + case 0: break; + default: gcc_unreachable (); + } ++ ++ if (level & POINTER_COMPRESSION_SAFE) ++ init_pointer_size_for_pointer_compression (); ++ + /* Preserved for backward compatibility, reorder fields needs run before + struct split and complete struct relayout. */ + if (flag_ipa_reorder_fields && level < STRUCT_REORDER_FIELDS) +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.h b/gcc/ipa-struct-reorg/ipa-struct-reorg.h +index 719f7b308..6c4469597 100644 +--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.h ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.h +@@ -121,7 +121,10 @@ private: + + public: + tree newtype[max_split]; ++ tree pc_gptr; + bool visited; ++ bool pc_candidate; ++ bool has_legal_alloc_num; + /* Negative number means it has illegal allocated arrays + that we do not optimize. */ + int has_alloc_array; +@@ -145,6 +148,7 @@ public: + void analyze (void); + bool has_dead_field (void); + void mark_escape (escape_type, gimple *stmt); ++ void create_global_ptr_for_pc (); + bool has_escaped (void) + { + return escapes != does_not_escape; +diff --git a/gcc/params.opt b/gcc/params.opt +index 1ddf1343f..d2196dc68 100644 +--- a/gcc/params.opt ++++ b/gcc/params.opt +@@ -1205,4 +1205,8 @@ Enum(vrp_mode) String(vrp) Value(VRP_MODE_VRP) + EnumValue + Enum(vrp_mode) String(ranger) Value(VRP_MODE_RANGER) + ++-param=compressed-pointer-size= ++Common Joined UInteger Var(param_pointer_compression_size) Init(32) IntegerRange(8, 32) Param Optimization ++Target size of compressed pointer, which should be 8, 16 or 32. ++ + ; This comment is to ensure we retain the blank line above. +-- +2.33.0 + diff --git a/0030-Struct-Reorg-Add-unsafe-structure-pointer-compressio.patch b/0030-Struct-Reorg-Add-unsafe-structure-pointer-compressio.patch new file mode 100644 index 0000000000000000000000000000000000000000..2bca621d6cd208a52ae2bf278d8dbbae8fd7ab35 --- /dev/null +++ b/0030-Struct-Reorg-Add-unsafe-structure-pointer-compressio.patch @@ -0,0 +1,1232 @@ +From 82d6166cd29fb1c3474f29b28cb7e5478d3a551a Mon Sep 17 00:00:00 2001 +From: liyancheng <412998149@qq.com> +Date: Mon, 25 Dec 2023 11:17:04 +0800 +Subject: [PATCH] [Struct Reorg] Add unsafe structure pointer compression + +Unsafe structure pointer compression allows for some dangerous +conversions for better performance. +Add flag -fipa-struct-reorg=5 to enable unsafe structure pointer +compression. +--- + gcc/common.opt | 6 +- + gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 365 ++++++++++++++---- + gcc/symbol-summary.h | 22 +- + .../gcc.dg/struct/csr_skip_void_struct_name.c | 53 +++ + gcc/testsuite/gcc.dg/struct/pc_cast_int.c | 91 +++++ + .../gcc.dg/struct/pc_compress_and_decomress.c | 90 +++++ + gcc/testsuite/gcc.dg/struct/pc_ptr2void.c | 87 +++++ + .../gcc.dg/struct/pc_simple_rewrite_pc.c | 112 ++++++ + .../gcc.dg/struct/pc_skip_void_struct_name.c | 53 +++ + gcc/testsuite/gcc.dg/struct/struct-reorg.exp | 8 + + 10 files changed, 804 insertions(+), 83 deletions(-) + create mode 100644 gcc/testsuite/gcc.dg/struct/csr_skip_void_struct_name.c + create mode 100644 gcc/testsuite/gcc.dg/struct/pc_cast_int.c + create mode 100644 gcc/testsuite/gcc.dg/struct/pc_compress_and_decomress.c + create mode 100644 gcc/testsuite/gcc.dg/struct/pc_ptr2void.c + create mode 100644 gcc/testsuite/gcc.dg/struct/pc_simple_rewrite_pc.c + create mode 100644 gcc/testsuite/gcc.dg/struct/pc_skip_void_struct_name.c + +diff --git a/gcc/common.opt b/gcc/common.opt +index 56b547506..c7c6bc256 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1993,9 +1993,9 @@ Common Var(flag_ipa_struct_reorg) Init(0) Optimization + Perform structure layout optimizations. + + fipa-struct-reorg= +-Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0) IntegerRange(0, 4) +--fipa-struct-reorg=[0,1,2,3,4] adding none, struct-reorg, reorder-fields, +-dfe, safe-pointer-compression optimizations. ++Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0) IntegerRange(0, 5) ++-fipa-struct-reorg=[0,1,2,3,4,5] adding none, struct-reorg, reorder-fields, ++dfe, safe-pointer-compression, unsafe-pointer-compression optimizations. + + fipa-vrp + Common Var(flag_ipa_vrp) Optimization +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +index 5d451c4c8..fa33f2d35 100644 +--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +@@ -293,7 +293,8 @@ enum struct_layout_opt_level + COMPLETE_STRUCT_RELAYOUT = 1 << 1, + STRUCT_REORDER_FIELDS = 1 << 2, + DEAD_FIELD_ELIMINATION = 1 << 3, +- POINTER_COMPRESSION_SAFE = 1 << 4 ++ POINTER_COMPRESSION_SAFE = 1 << 4, ++ POINTER_COMPRESSION_UNSAFE = 1 << 5 + }; + + /* Defines the target pointer size of compressed pointer, which should be 8, +@@ -1267,10 +1268,10 @@ csrtype::init_type_info (void) + + /* Close enough to pad to improve performance. + 33~63 should pad to 64 but 33~48 (first half) are too far away, and +- 65~127 should pad to 128 but 65~96 (first half) are too far away. */ ++ 70~127 should pad to 128 but 65~70 (first half) are too far away. */ + if (old_size > 48 && old_size < 64) + new_size = 64; +- if (old_size > 96 && old_size < 128) ++ if (old_size > 70 && old_size < 128) + new_size = 128; + + /* For performance reasons, only allow structure size +@@ -1423,8 +1424,12 @@ public: + bool pc_candidate_tree_p (tree); + bool pc_type_conversion_candidate_p (tree); + bool pc_direct_rewrite_chance_p (tree, tree &); ++ bool pc_simplify_chance_for_compress_p (gassign *, tree); ++ bool compress_candidate_without_check (gimple_stmt_iterator *, tree, tree &); + bool compress_candidate_with_check (gimple_stmt_iterator *, tree, tree &); + bool compress_candidate (gassign *, gimple_stmt_iterator *, tree, tree &); ++ bool decompress_candidate_without_check (gimple_stmt_iterator *, ++ tree, tree, tree &, tree &); + bool decompress_candidate_with_check (gimple_stmt_iterator *, tree, tree &); + bool decompress_candidate (gimple_stmt_iterator *, tree, tree, tree &, + tree &); +@@ -1924,7 +1929,6 @@ bool + ipa_struct_relayout::maybe_rewrite_cst (tree cst, gimple_stmt_iterator *gsi, + HOST_WIDE_INT ×) + { +- bool ret = false; + gcc_assert (TREE_CODE (cst) == INTEGER_CST); + + gimple *stmt = gsi_stmt (*gsi); +@@ -1948,27 +1952,95 @@ ipa_struct_relayout::maybe_rewrite_cst (tree cst, gimple_stmt_iterator *gsi, + { + if (gsi_one_before_end_p (*gsi)) + return false; +- gsi_next (gsi); +- gimple *stmt2 = gsi_stmt (*gsi); +- +- if (gimple_code (stmt2) == GIMPLE_ASSIGN +- && gimple_assign_rhs_code (stmt2) == POINTER_PLUS_EXPR) ++ // Check uses. ++ imm_use_iterator imm_iter_lhs; ++ use_operand_p use_p_lhs; ++ FOR_EACH_IMM_USE_FAST (use_p_lhs, imm_iter_lhs, gimple_assign_lhs (stmt)) + { +- tree lhs = gimple_assign_lhs (stmt2); +- tree rhs1 = gimple_assign_rhs1 (stmt2); +- if (types_compatible_p (inner_type (TREE_TYPE (rhs1)), ctype.type) +- || types_compatible_p (inner_type (TREE_TYPE (lhs)), ctype.type)) ++ gimple *stmt2 = USE_STMT (use_p_lhs); ++ if (gimple_code (stmt2) != GIMPLE_ASSIGN) ++ continue; ++ if (gimple_assign_rhs_code (stmt2) == POINTER_PLUS_EXPR) + { +- tree num = NULL; +- if (is_result_of_mult (cst, &num, TYPE_SIZE_UNIT (ctype.type))) ++ tree lhs = gimple_assign_lhs (stmt2); ++ tree rhs1 = gimple_assign_rhs1 (stmt2); ++ if (types_compatible_p (inner_type (TREE_TYPE (rhs1)), ctype.type) ++ || types_compatible_p (inner_type (TREE_TYPE (lhs)), ++ ctype.type)) + { +- times = TREE_INT_CST_LOW (num); +- ret = true; ++ tree num = NULL; ++ if (is_result_of_mult (cst, &num, ++ TYPE_SIZE_UNIT (ctype.type))) ++ { ++ times = TREE_INT_CST_LOW (num); ++ return true; ++ } ++ } ++ } ++ // For pointer compression, handle plus stmt. ++ else if (gimple_assign_rhs_code (stmt2) == PLUS_EXPR) ++ { ++ // Check uses. ++ imm_use_iterator imm_iter_cast; ++ use_operand_p use_p_cast; ++ FOR_EACH_IMM_USE_FAST (use_p_cast, imm_iter_cast, ++ gimple_assign_lhs (stmt2)) ++ { ++ gimple *stmt_cast = USE_STMT (use_p_cast); ++ if (gimple_code (stmt_cast) != GIMPLE_ASSIGN) ++ continue; ++ if (gimple_assign_cast_p (stmt_cast)) ++ { ++ tree lhs_type = inner_type (TREE_TYPE ( ++ gimple_assign_lhs (stmt_cast))); ++ if (types_compatible_p (lhs_type, ctype.type)) ++ { ++ tree num = NULL; ++ if (is_result_of_mult (cst, &num, ++ TYPE_SIZE_UNIT (ctype.type))) ++ { ++ times = TREE_INT_CST_LOW (num); ++ return true; ++ } ++ } ++ } + } + } + } +- gsi_prev (gsi); +- return ret; ++ } ++ // For pointer compression, handle div stmt. ++ if (gimple_assign_rhs_code (stmt) == TRUNC_DIV_EXPR) ++ { ++ imm_use_iterator imm_iter; ++ use_operand_p use_p; ++ tree lhs = gimple_assign_lhs (stmt); ++ if (lhs == NULL_TREE) ++ return false; ++ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) ++ { ++ gimple *use_stmt = USE_STMT (use_p); ++ if (is_gimple_debug (use_stmt)) ++ continue; ++ if (gimple_code (use_stmt) != GIMPLE_ASSIGN) ++ continue; ++ if (gimple_assign_cast_p (use_stmt)) ++ { ++ tree lhs_type = inner_type (TREE_TYPE ( ++ gimple_assign_lhs (use_stmt))); ++ if (TYPE_UNSIGNED (lhs_type) ++ && TREE_CODE (lhs_type) == INTEGER_TYPE ++ && TYPE_PRECISION (lhs_type) == compressed_size) ++ { ++ tree num = NULL; ++ if (is_result_of_mult (cst, &num, ++ TYPE_SIZE_UNIT (ctype.type))) ++ { ++ times = TREE_INT_CST_LOW (num); ++ return true; ++ } ++ } ++ } ++ } + } + return false; + } +@@ -2967,7 +3039,9 @@ ipa_struct_reorg::record_var (tree decl, escape_type escapes, int arg) + && TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE) + e = escape_separate_instance; + +- if (e != does_not_escape) ++ if (e != does_not_escape ++ && (current_layout_opt_level != COMPLETE_STRUCT_RELAYOUT ++ || replace_type_map.get (type->type) == NULL)) + type->mark_escape (e, NULL); + } + +@@ -3629,7 +3703,9 @@ ipa_struct_reorg::maybe_mark_or_record_other_side (tree side, tree other, + if (TREE_CODE (side) == SSA_NAME + && VOID_POINTER_P (TREE_TYPE (side))) + return; +- d->type->mark_escape (escape_cast_another_ptr, stmt); ++ if (current_layout_opt_level != COMPLETE_STRUCT_RELAYOUT ++ || replace_type_map.get (d->type->type) == NULL) ++ d->type->mark_escape (escape_cast_another_ptr, stmt); + return; + } + +@@ -3645,7 +3721,9 @@ ipa_struct_reorg::maybe_mark_or_record_other_side (tree side, tree other, + } + else + /* *_1 = &MEM[(void *)&x + 8B]. */ +- type->mark_escape (escape_cast_another_ptr, stmt); ++ if (current_layout_opt_level != COMPLETE_STRUCT_RELAYOUT ++ || replace_type_map.get (type->type) == NULL) ++ type->mark_escape (escape_cast_another_ptr, stmt); + } + else if (type != d->type) + { +@@ -4364,7 +4442,9 @@ ipa_struct_reorg::check_definition_assign (srdecl *decl, + /* Casts between pointers and integer are escaping. */ + if (gimple_assign_cast_p (stmt)) + { +- type->mark_escape (escape_cast_int, stmt); ++ if (current_layout_opt_level != COMPLETE_STRUCT_RELAYOUT ++ || replace_type_map.get (type->type) == NULL) ++ type->mark_escape (escape_cast_int, stmt); + return; + } + +@@ -4684,7 +4764,9 @@ ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt, + /* Casts between pointers and integer are escaping. */ + if (gimple_assign_cast_p (stmt)) + { +- type->mark_escape (escape_cast_int, stmt); ++ if (current_layout_opt_level != COMPLETE_STRUCT_RELAYOUT ++ || replace_type_map.get (type->type) == NULL) ++ type->mark_escape (escape_cast_int, stmt); + return; + } + +@@ -5364,9 +5446,9 @@ ipa_struct_reorg::prune_escaped_types (void) + + /* Prune types that escape, all references to those types + will have been removed in the above loops. */ +- /* The escape type is not deleted in STRUCT_REORDER_FIELDS, +- Then the type that contains the escaped type fields +- can find complete information. */ ++ /* The escape type is not deleted in current_layout_opt_level ++ after STRUCT_REORDER_FIELDS, then the type that contains ++ the escaped type fields can find complete information. */ + if (current_layout_opt_level < STRUCT_REORDER_FIELDS) + { + for (unsigned i = 0; i < types.length ();) +@@ -5842,17 +5924,17 @@ ipa_struct_reorg::compress_ptr_to_offset (tree xhs, srtype *type, + tree step2 = gimplify_build2 (gsi, TRUNC_DIV_EXPR, long_unsigned_type_node, + step1, TYPE_SIZE_UNIT (type->newtype[0])); + +- /* Emit gimple _X3 = _X2 + 1. */ +- tree step3 = gimplify_build2 (gsi, PLUS_EXPR, long_unsigned_type_node, +- step2, build_one_cst (long_unsigned_type_node)); ++ /* Emit _X3 = (compressed_size) _X2. */ ++ tree pc_type = make_unsigned_type (compressed_size); ++ tree step3 = gimplify_build1 (gsi, NOP_EXPR, pc_type, step2); + +- /* Emit _X4 = (compressed_size) _X3. */ +- tree step4 = gimplify_build1 (gsi, NOP_EXPR, +- make_unsigned_type (compressed_size), step3); ++ /* Emit gimple _X4 = _X3 + 1. */ ++ tree step4 = gimplify_build2 (gsi, PLUS_EXPR, pc_type, step3, ++ build_one_cst (pc_type)); + + if (dump_file && (dump_flags & TDF_DETAILS)) + { +- print_generic_expr (dump_file, step3); ++ print_generic_expr (dump_file, step4); + fprintf (dump_file, "\n"); + } + return step4; +@@ -5894,7 +5976,7 @@ ipa_struct_reorg::decompress_offset_to_ptr (tree xhs, srtype *type, + + if (dump_file && (dump_flags & TDF_DETAILS)) + { +- print_generic_expr (dump_file, step3); ++ print_generic_expr (dump_file, step4); + fprintf (dump_file, "\n"); + } + return step4; +@@ -5967,7 +6049,10 @@ ipa_struct_reorg::pc_type_conversion_candidate_p (tree xhs) + + if (TREE_CODE (xhs) == COMPONENT_REF) + { +- srtype *base_type = find_type (TREE_TYPE (TREE_OPERAND (xhs, 0))); ++ tree mem = TREE_OPERAND (xhs, 0); ++ if (TREE_CODE (mem) != MEM_REF) ++ return false; ++ srtype *base_type = find_type (TREE_TYPE (mem)); + if (base_type != NULL && base_type->has_escaped ()) + return pc_candidate_st_type_p (TREE_TYPE (xhs)); + +@@ -6057,6 +6142,49 @@ ipa_struct_reorg::pc_direct_rewrite_chance_p (tree rhs, tree &new_rhs) + return false; + } + ++/* The following cases can simplify the checking of null pointer: ++ 1. rhs defined from POINTER_PLUS_EXPR. ++ 2. rhs used as COMPONENT_REF in this basic block. */ ++ ++bool ++ipa_struct_reorg::pc_simplify_chance_for_compress_p (gassign *stmt, ++ tree rhs) ++{ ++ imm_use_iterator imm_iter; ++ use_operand_p use_p; ++ gimple *def_stmt = SSA_NAME_DEF_STMT (rhs); ++ ++ if (def_stmt && is_gimple_assign (def_stmt) ++ && gimple_assign_rhs_code (def_stmt) == POINTER_PLUS_EXPR) ++ return true; ++ ++ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, rhs) ++ { ++ gimple *use_stmt = USE_STMT (use_p); ++ if (use_stmt->bb != stmt->bb || !is_gimple_assign (use_stmt)) ++ continue; ++ ++ tree use_rhs = gimple_assign_rhs1 (use_stmt); ++ if (TREE_CODE (use_rhs) == COMPONENT_REF ++ && TREE_OPERAND (TREE_OPERAND (use_rhs, 0), 0) == rhs) ++ return true; ++ } ++ return false; ++} ++ ++/* Perform compression directly without checking null pointer. */ ++ ++bool ++ipa_struct_reorg::compress_candidate_without_check (gimple_stmt_iterator *gsi, ++ tree rhs, ++ tree &new_rhs) ++{ ++ srtype *type = get_compression_candidate_type (rhs); ++ gcc_assert (type != NULL); ++ new_rhs = compress_ptr_to_offset (new_rhs, type, gsi); ++ return true; ++} ++ + /* Perform pointer compression with check. The conversion will be as shown in + the following example: + Orig bb: +@@ -6157,6 +6285,9 @@ ipa_struct_reorg::compress_candidate (gassign *stmt, gimple_stmt_iterator *gsi, + { + if (pc_direct_rewrite_chance_p (rhs, new_rhs)) + return true; ++ else if (current_layout_opt_level & POINTER_COMPRESSION_UNSAFE ++ && pc_simplify_chance_for_compress_p (stmt, rhs)) ++ return compress_candidate_without_check (gsi, rhs, new_rhs); + + return compress_candidate_with_check (gsi, rhs, new_rhs); + } +@@ -6219,6 +6350,80 @@ ipa_struct_reorg::create_bb_for_decompress_candidate (basic_block last_bb, + return new_bb; + } + ++/* Try decompress candidate without check. */ ++ ++bool ++ipa_struct_reorg::decompress_candidate_without_check (gimple_stmt_iterator *gsi, ++ tree lhs, tree rhs, ++ tree &new_lhs, ++ tree &new_rhs) ++{ ++ imm_use_iterator imm_iter; ++ use_operand_p use_p; ++ bool processed = false; ++ ++ if (!gsi_one_before_end_p (*gsi)) ++ { ++ gsi_next (gsi); ++ gimple *next_stmt = gsi_stmt (*gsi); ++ if (gimple_code (next_stmt) == GIMPLE_ASSIGN ++ && gimple_assign_rhs_class (next_stmt) == GIMPLE_SINGLE_RHS) ++ { ++ tree next_rhs = gimple_assign_rhs1 (next_stmt); ++ /* If current lhs is used as rhs in the next stmt: ++ -> _1 = t->s ++ tt->s = _1. */ ++ if (lhs == next_rhs) ++ { ++ /* Check whether: ++ 1. the lhs is only used in the next stmt. ++ 2. the next lhs is candidate type. */ ++ if (has_single_use (lhs) ++ && pc_candidate_tree_p (gimple_assign_lhs (next_stmt))) ++ { ++ processed = true; ++ /* Copy directly without conversion after update type. */ ++ TREE_TYPE (new_lhs) ++ = make_unsigned_type (compressed_size); ++ } ++ } ++ /* -> _1 = t->s ++ _2 = _1->s ++ In this case, _1 might not be nullptr, so decompress it without ++ check. */ ++ else if (TREE_CODE (next_rhs) == COMPONENT_REF) ++ { ++ tree use_base = TREE_OPERAND (TREE_OPERAND (next_rhs, 0), 0); ++ if (use_base == lhs) ++ { ++ srtype *type = get_compression_candidate_type (rhs); ++ gcc_assert (type != NULL); ++ gsi_prev (gsi); ++ tree new_ref = NULL_TREE; ++ if (TREE_CODE (new_rhs) == MEM_REF) ++ new_ref = new_rhs; ++ else ++ { ++ tree base = TREE_OPERAND (TREE_OPERAND (new_rhs, 0), 0); ++ tree new_mem_ref = build_simple_mem_ref (base); ++ new_ref = build3 (COMPONENT_REF, ++ TREE_TYPE (new_rhs), ++ new_mem_ref, ++ TREE_OPERAND (new_rhs, 1), ++ NULL_TREE); ++ } ++ new_rhs = decompress_offset_to_ptr (new_ref, type, gsi); ++ processed = true; ++ gsi_next (gsi); ++ } ++ } ++ } ++ gsi_prev (gsi); ++ return processed; ++ } ++ return false; ++} ++ + /* Perform pointer decompression with check. The conversion will be as shown + in the following example: + Orig bb: +@@ -6320,7 +6525,10 @@ ipa_struct_reorg::decompress_candidate (gimple_stmt_iterator *gsi, + tree lhs, tree rhs, tree &new_lhs, + tree &new_rhs) + { +- // TODO: simplifiy check and rewrite will be pushed in next PR. ++ if (current_layout_opt_level & POINTER_COMPRESSION_UNSAFE ++ && decompress_candidate_without_check (gsi, lhs, rhs, new_lhs, new_rhs)) ++ return true; ++ + return decompress_candidate_with_check (gsi, rhs, new_rhs); + } + +@@ -6341,14 +6549,23 @@ ipa_struct_reorg::try_rewrite_with_pointer_compression (gassign *stmt, + if (pc_type_conversion_candidate_p (lhs)) + { + /* Transfer MEM[(struct *)_1].files = _4; +- to MEM[(struct *)_1].files = (struct *)_4; */ +- new_rhs = fold_convert (TREE_TYPE (lhs), tmp_rhs); ++ to _tmp = (struct *)_4; ++ MEM[(struct *)_1].files = _tmp; */ ++ tree tmp_reg = create_tmp_reg (TREE_TYPE (lhs)); ++ tree tmp_rhs_cvt = fold_convert (TREE_TYPE (lhs), tmp_rhs); ++ gimple *copy_stmt = gimple_build_assign (tmp_reg, tmp_rhs_cvt); ++ gsi_insert_before (gsi, copy_stmt, GSI_SAME_STMT); ++ new_rhs = tmp_reg; + } + else if (pc_type_conversion_candidate_p (rhs)) + { + /* Transfer _4 = MEM[(struct *)_1].nodes; +- to _4 = (new_struct *) MEM[(struct *)_1].nodes; */ +- new_rhs = fold_convert (TREE_TYPE (new_lhs), tmp_rhs); ++ to _tmp = MEM[(struct *)_1].nodes; ++ _4 = (new_struct *) _tmp; */ ++ tree tmp_reg = create_tmp_reg (TREE_TYPE (new_lhs)); ++ gimple *copy_stmt = gimple_build_assign (tmp_reg, tmp_rhs); ++ gsi_insert_before (gsi, copy_stmt, GSI_SAME_STMT); ++ new_rhs = fold_convert (TREE_TYPE (new_lhs), tmp_reg); + } + } + else if (l && r) +@@ -6544,7 +6761,7 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + + if (dump_file && (dump_flags & TDF_DETAILS)) + { +- fprintf (dump_file, "\nrewriting stamtenet:\n"); ++ fprintf (dump_file, "\nrewriting statement:\n"); + print_gimple_stmt (dump_file, stmt, 0); + } + tree newlhs[max_split]; +@@ -6809,7 +7026,8 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi) + old statement is to be removed. */ + + bool +-ipa_struct_reorg::rewrite_cond (gcond *stmt, gimple_stmt_iterator *gsi) ++ipa_struct_reorg::rewrite_cond (gcond *stmt, ++ gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED) + { + tree_code rhs_code = gimple_cond_code (stmt); + +@@ -7039,8 +7257,11 @@ ipa_struct_reorg::rewrite_functions (void) + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "\nNo rewrite:\n"); +- dump_function_to_file (current_function_decl, dump_file, +- dump_flags | TDF_VOPS); ++ if (current_function_decl) ++ dump_function_to_file (current_function_decl, dump_file, ++ dump_flags | TDF_VOPS); ++ else ++ fprintf (dump_file, " no declaration\n"); + } + pop_cfun (); + } +@@ -7073,11 +7294,13 @@ ipa_struct_reorg::rewrite_functions (void) + push_cfun (DECL_STRUCT_FUNCTION (node->decl)); + if (dump_file && (dump_flags & TDF_DETAILS)) + { +- fprintf (dump_file, "==== Before create decls: %dth_%s ====\n\n", ++ fprintf (dump_file, "==== Before create decls: %dth %s ====\n\n", + i, f->node->name ()); + if (current_function_decl) + dump_function_to_file (current_function_decl, dump_file, + dump_flags | TDF_VOPS); ++ else ++ fprintf (dump_file, " no declaration\n"); + } + pop_cfun (); + } +@@ -7109,10 +7332,13 @@ ipa_struct_reorg::rewrite_functions (void) + + if (dump_file && (dump_flags & TDF_DETAILS)) + { +- fprintf (dump_file, "\nBefore rewrite: %dth_%s\n", ++ fprintf (dump_file, "\nBefore rewrite: %dth %s\n", + i, f->node->name ()); +- dump_function_to_file (current_function_decl, dump_file, +- dump_flags | TDF_VOPS); ++ if (current_function_decl) ++ dump_function_to_file (current_function_decl, dump_file, ++ dump_flags | TDF_VOPS); ++ else ++ fprintf (dump_file, " no declaration\n"); + fprintf (dump_file, "\n======== Start to rewrite: %dth_%s ========\n", + i, f->node->name ()); + } +@@ -7187,10 +7413,13 @@ ipa_struct_reorg::rewrite_functions (void) + + if (dump_file) + { +- fprintf (dump_file, "\nAfter rewrite: %dth_%s\n", ++ fprintf (dump_file, "\nAfter rewrite: %dth %s\n", + i, f->node->name ()); +- dump_function_to_file (current_function_decl, dump_file, +- dump_flags | TDF_VOPS); ++ if (current_function_decl) ++ dump_function_to_file (current_function_decl, dump_file, ++ dump_flags | TDF_VOPS); ++ else ++ fprintf (dump_file, " no declaration\n"); + } + + pop_cfun (); +@@ -7309,18 +7538,24 @@ ipa_struct_reorg::check_and_prune_struct_for_pointer_compression (void) + " skip compression.\n"); + continue; + } +- if ((current_layout_opt_level & POINTER_COMPRESSION_SAFE) +- && !type->has_legal_alloc_num) ++ if (!type->has_legal_alloc_num) + { +- if (dump_file) +- fprintf (dump_file, " has illegal struct array size," +- " skip compression.\n"); +- continue; ++ if (current_layout_opt_level & POINTER_COMPRESSION_UNSAFE) ++ if (dump_file) ++ fprintf (dump_file, " has unknown alloc size, but" ++ " in unsafe mode, so"); ++ else ++ { ++ if (dump_file) ++ fprintf (dump_file, " has illegal struct array size," ++ " skip compression.\n"); ++ continue; ++ } + } + pc_transform_num++; + type->pc_candidate = true; + if (dump_file) +- fprintf (dump_file, " attemps to do pointer compression.\n"); ++ fprintf (dump_file, " attempts to do pointer compression.\n"); + } + + if (dump_file) +@@ -7342,14 +7577,10 @@ init_pointer_size_for_pointer_compression (void) + switch (param_pointer_compression_size) + { + case 8: +- compressed_size = 8; // sizeof (uint8) +- break; ++ // FALLTHRU + case 16: +- compressed_size = 16; // sizeof (uint16) +- break; +- case 32: +- compressed_size = 32; // sizeof (uint32) +- break; ++ // FALLTHRU ++ case 32: compressed_size = param_pointer_compression_size; break; + default: + error ("Invalid pointer compression size, using the following param: " + "\"--param compressed-pointer-size=[8,16,32]\""); +@@ -7426,6 +7657,8 @@ public: + unsigned int level = 0; + switch (struct_layout_optimize_level) + { ++ case 5: level |= POINTER_COMPRESSION_UNSAFE; ++ // FALLTHRU + case 4: level |= POINTER_COMPRESSION_SAFE; + // FALLTHRU + case 3: level |= DEAD_FIELD_ELIMINATION; +diff --git a/gcc/symbol-summary.h b/gcc/symbol-summary.h +index 3fe64047c..4f896f4e4 100644 +--- a/gcc/symbol-summary.h ++++ b/gcc/symbol-summary.h +@@ -103,16 +103,15 @@ protected: + /* Allocates new data that are stored within map. */ + T* allocate_new () + { +- /* In structure optimizatons, we call new to ensure that +- the allocated memory is initialized to 0. */ +- if (flag_ipa_struct_reorg) +- return is_ggc () ? new (ggc_internal_alloc (sizeof (T))) T () +- : new T (); +- + /* Call gcc_internal_because we do not want to call finalizer for + a type T. We call dtor explicitly. */ +- return is_ggc () ? new (ggc_internal_alloc (sizeof (T))) T () +- : m_allocator.allocate () ; ++ T* allocated = is_ggc () ? new (ggc_internal_alloc (sizeof (T))) T () ++ : m_allocator.allocate (); ++ /* In structure optimizatons, we call memset to ensure that ++ the allocated memory is initialized to 0. */ ++ if (flag_ipa_struct_reorg) ++ memset (allocated, 0, sizeof (T)); ++ return allocated; + } + + /* Release an item that is stored within map. */ +@@ -121,12 +120,7 @@ protected: + if (is_ggc ()) + ggc_delete (item); + else +- { +- if (flag_ipa_struct_reorg) +- delete item; +- else +- m_allocator.remove (item); +- } ++ m_allocator.remove (item); + } + + /* Unregister all call-graph hooks. */ +diff --git a/gcc/testsuite/gcc.dg/struct/csr_skip_void_struct_name.c b/gcc/testsuite/gcc.dg/struct/csr_skip_void_struct_name.c +new file mode 100644 +index 000000000..c5e4968d9 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/csr_skip_void_struct_name.c +@@ -0,0 +1,53 @@ ++// Structures without names should not be optimized ++/* { dg-do compile } */ ++#include ++#include ++ ++typedef struct ++{ ++ int a; ++ float b; ++ double s1; ++ double s2; ++ double s3; ++ double s4; ++ double s5; ++ double s6; ++ double s7; ++ double s8; ++} str_t1; ++ ++#define N 1000 ++ ++int num; ++ ++int ++main () ++{ ++ int i, r; ++ ++ r = rand (); ++ num = r > N ? N : r; ++ str_t1 *p1 = calloc (num, sizeof (str_t1)); ++ ++ if (p1 == NULL) ++ return 0; ++ ++ for (i = 0; i < num; i++) ++ p1[i].a = 1; ++ ++ for (i = 0; i < num; i++) ++ p1[i].b = 2; ++ ++ for (i = 0; i < num; i++) ++ if (p1[i].a != 1) ++ abort (); ++ ++ for (i = 0; i < num; i++) ++ if (fabsf (p1[i].b - 2) > 0.0001) ++ abort (); ++ ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "No structures to transform in Complete Structure Relayout." "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/pc_cast_int.c b/gcc/testsuite/gcc.dg/struct/pc_cast_int.c +new file mode 100644 +index 000000000..6f67fc556 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/pc_cast_int.c +@@ -0,0 +1,91 @@ ++// Escape cast int for pointer compression ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct network ++{ ++ arc_p arcs; ++ arc_p sorted_arcs; ++ int x; ++ node_p nodes; ++ node_p stop_nodes; ++} network_t; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++ network_t* net_add; ++}; ++ ++ ++const int MAX = 100; ++network_t* net; ++node_p node; ++ ++int ++main () ++{ ++ net = (network_t*) calloc (1, sizeof(network_t)); ++ net->arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ net->sorted_arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ net->nodes = (node_p) calloc (MAX, sizeof (node_t)); ++ net->arcs->id = 100; ++ ++ node = net->nodes; ++ node_p n1 = (node_p) 0x123456; ++ ++ for (unsigned i = 0; i < MAX; i++) ++ { ++ node->pred = n1; ++ node = node + 1; ++ } ++ ++ node = net->nodes; ++ ++ for (unsigned i = 0; i < MAX; i++) ++ { ++ if (node->pred != n1) ++ { ++ abort (); ++ } ++ node = node + 1; ++ } ++ ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "No structures to transform in pointer compression" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/pc_compress_and_decomress.c b/gcc/testsuite/gcc.dg/struct/pc_compress_and_decomress.c +new file mode 100644 +index 000000000..d0b8d1afa +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/pc_compress_and_decomress.c +@@ -0,0 +1,90 @@ ++// Support basic pointer compression and decompression ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct network ++{ ++ arc_p arcs; ++ arc_p sorted_arcs; ++ int x; ++ node_p nodes; ++ node_p stop_nodes; ++} network_t; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++ network_t* net_add; ++}; ++ ++ ++const int MAX = 100; ++network_t* net; ++node_p node; ++ ++int ++main () ++{ ++ net = (network_t*) calloc (1, sizeof(network_t)); ++ net->arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ net->sorted_arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ net->nodes = (node_p) calloc (MAX, sizeof (node_t)); ++ net->arcs->id = 100; ++ ++ node = net->nodes; ++ ++ for (unsigned i = 0; i < MAX; i++) ++ { ++ node->pred = node; ++ node = node + 1; ++ } ++ ++ node = net->nodes; ++ ++ for (unsigned i = 0; i < MAX; i++) ++ { ++ if (node->pred != node) ++ { ++ abort (); ++ } ++ node = node + 1; ++ } ++ ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform in pointer compression is 1" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/pc_ptr2void.c b/gcc/testsuite/gcc.dg/struct/pc_ptr2void.c +new file mode 100644 +index 000000000..5022c1967 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/pc_ptr2void.c +@@ -0,0 +1,87 @@ ++// Partially support escape_cast_void for pointer compression. ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct network ++{ ++ arc_p arcs, sorted_arcs; ++ int x; ++ node_p nodes, stop_nodes; ++} network_t; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++const int MAX = 100; ++network_t* net = NULL; ++int cnt = 0; ++ ++__attribute__((noinline)) int ++primal_feasible (network_t *net) ++{ ++ void* stop; ++ node_t *node; ++ ++ node = net->nodes; ++ stop = (void *)net->stop_nodes; ++ for( node++; node < (node_t *)stop; node++ ) ++ { ++ net->x = 1; ++ printf( "PRIMAL NETWORK SIMPLEX: "); ++ } ++ return 0; ++} ++ ++int ++main () ++{ ++ net = (network_t*) calloc (1, 20); ++ net->nodes = calloc (MAX, sizeof (node_t)); ++ net->stop_nodes = net->nodes + MAX - 1; ++ cnt = primal_feasible( net ); ++ ++ net = (network_t*) calloc (1, 20); ++ if( !(net->arcs) ) ++ { ++ return -1; ++ } ++ return cnt; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform in pointer compression is 1" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/pc_simple_rewrite_pc.c b/gcc/testsuite/gcc.dg/struct/pc_simple_rewrite_pc.c +new file mode 100644 +index 000000000..98943c9b8 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/pc_simple_rewrite_pc.c +@@ -0,0 +1,112 @@ ++// Check simplify rewrite chance for pointer compression and decompression ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct network ++{ ++ arc_p arcs; ++ arc_p sorted_arcs; ++ int x; ++ node_p nodes; ++ node_p stop_nodes; ++} network_t; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++ network_t* net_add; ++}; ++ ++ ++const int MAX = 100; ++network_t* net; ++node_p node; ++arc_p arc; ++ ++int ++main () ++{ ++ net = (network_t*) calloc (1, sizeof(network_t)); ++ net->arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ net->sorted_arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ net->nodes = (node_p) calloc (MAX, sizeof (node_t)); ++ net->arcs->id = 100; ++ ++ node = net->nodes; ++ arc = net->arcs; ++ ++ for (unsigned i = 0; i < MAX; i++) ++ { ++ arc->head = node; ++ arc->head->child = node; ++ node->potential = i + 1; ++ arc->cost = arc->head->potential; ++ arc->tail = node->sibling; ++ if (i % 2) ++ node->pred = net->nodes + i; ++ else ++ node->pred = NULL; ++ ++ if (node->pred && node->pred->child != NULL) ++ node->number = 0; ++ else ++ node->number = 1; ++ ++ node = node + 1; ++ arc = arc + 1; ++ } ++ ++ node = net->nodes; ++ arc = net->arcs; ++ ++ for (unsigned i = 0; i < MAX; i++) ++ { ++ node_p t = i % 2 ? node : NULL; ++ int tt = i % 2 ? 0 : 1; ++ if (arc->head->pred != t || arc->cost == 0 ++ || arc->tail != node->sibling || node->number != tt) ++ { ++ abort (); ++ } ++ arc = arc + 1; ++ node = node + 1; ++ } ++ ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform in pointer compression is 1" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/pc_skip_void_struct_name.c b/gcc/testsuite/gcc.dg/struct/pc_skip_void_struct_name.c +new file mode 100644 +index 000000000..a0e191267 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/pc_skip_void_struct_name.c +@@ -0,0 +1,53 @@ ++// Structures without names should not be optimized ++/* { dg-do compile } */ ++#include ++#include ++ ++typedef struct ++{ ++ int a; ++ float b; ++ double s1; ++ double s2; ++ double s3; ++ double s4; ++ double s5; ++ double s6; ++ double s7; ++ double s8; ++} str_t1; ++ ++#define N 1000 ++ ++int num; ++ ++int ++main () ++{ ++ int i, r; ++ ++ r = rand (); ++ num = r > N ? N : r; ++ str_t1 *p1 = calloc (num, sizeof (str_t1)); ++ ++ if (p1 == NULL) ++ return 0; ++ ++ for (i = 0; i < num; i++) ++ p1[i].a = 1; ++ ++ for (i = 0; i < num; i++) ++ p1[i].b = 2; ++ ++ for (i = 0; i < num; i++) ++ if (p1[i].a != 1) ++ abort (); ++ ++ for (i = 0; i < num; i++) ++ if (fabsf (p1[i].b - 2) > 0.0001) ++ abort (); ++ ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "No structures to transform in pointer compression" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp +index 278c4e4f5..c40474407 100644 +--- a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp ++++ b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp +@@ -47,6 +47,14 @@ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/rf_*.c]] \ + gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/dfe*.c]] \ + "" "-fipa-struct-reorg=3 -fdump-ipa-all -flto-partition=one -fwhole-program" + ++# -fipa-struct-reorg=4 ++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/pc*.c]] \ ++ "" "-fipa-struct-reorg=4 -fdump-ipa-all -flto-partition=one -fwhole-program" ++ ++# -fipa-struct-reorg=5 ++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/pc*.c]] \ ++ "" "-fipa-struct-reorg=5 -fdump-ipa-all -flto-partition=one -fwhole-program" ++ + # All done. + torture-finish + dg-finish +-- +2.33.0 + diff --git a/0031-AutoBOLT-Support-saving-feedback-count-info-to-ELF-s.patch b/0031-AutoBOLT-Support-saving-feedback-count-info-to-ELF-s.patch new file mode 100644 index 0000000000000000000000000000000000000000..5e16f31f15bc7f83a7ad4f9df489f95215a5c676 --- /dev/null +++ b/0031-AutoBOLT-Support-saving-feedback-count-info-to-ELF-s.patch @@ -0,0 +1,550 @@ +From 72531376df5ed93c2d945469368ba5514eca8407 Mon Sep 17 00:00:00 2001 +From: zhenyu--zhao_admin +Date: Tue, 5 Dec 2023 15:33:08 +0800 +Subject: [PATCH] [AutoBOLT] Support saving feedback count info to ELF segment + 1/3 + +--- + gcc/common.opt | 8 + + gcc/final.cc | 405 ++++++++++++++++++++++++++++++++++++++++++++++++- + gcc/opts.cc | 61 ++++++++ + 3 files changed, 473 insertions(+), 1 deletion(-) + +diff --git a/gcc/common.opt b/gcc/common.opt +index b01df919e..e69947fc2 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -2546,6 +2546,14 @@ freorder-functions + Common Var(flag_reorder_functions) Optimization + Reorder functions to improve code placement. + ++fauto-bolt ++Common Var(flag_auto_bolt) ++Generate profile from AutoFDO or PGO and do BOLT optimization after linkage. ++ ++fauto-bolt= ++Common Joined RejectNegative ++Specify the feedback data directory required by BOLT-plugin. The default is the current directory. ++ + frerun-cse-after-loop + Common Var(flag_rerun_cse_after_loop) Optimization + Add a common subexpression elimination pass after loop optimizations. +diff --git a/gcc/final.cc b/gcc/final.cc +index a9868861b..d4c4fa08f 100644 +--- a/gcc/final.cc ++++ b/gcc/final.cc +@@ -81,6 +81,7 @@ along with GCC; see the file COPYING3. If not see + #include "rtl-iter.h" + #include "print-rtl.h" + #include "function-abi.h" ++#include "insn-codes.h" + #include "common/common-target.h" + + #ifdef XCOFF_DEBUGGING_INFO +@@ -4266,7 +4267,403 @@ leaf_renumber_regs_insn (rtx in_rtx) + } + } + #endif +- ++ ++#define ASM_FDO_SECTION_PREFIX ".text.fdo." ++ ++#define ASM_FDO_CALLER_FLAG ".fdo.caller " ++#define ASM_FDO_CALLER_SIZE_FLAG ".fdo.caller.size " ++#define ASM_FDO_CALLER_BIND_FLAG ".fdo.caller.bind" ++ ++#define ASM_FDO_CALLEE_FLAG ".fdo.callee" ++ ++/* Return the relative offset address of the start instruction of BB, ++ return -1 if it is empty instruction. */ ++ ++static int ++get_bb_start_addr (basic_block bb) ++{ ++ rtx_insn *insn; ++ FOR_BB_INSNS (bb, insn) ++ { ++ if (!INSN_P (insn)) ++ { ++ continue; ++ } ++ /* The jump target of call is not in this function, so ++ it should be excluded. */ ++ if (CALL_P (insn)) ++ { ++ return -1; ++ } ++ ++ int insn_code = recog_memoized (insn); ++ ++ /* The instruction NOP in llvm-bolt belongs to the previous ++ BB, so it needs to be skipped. */ ++ if (insn_code != CODE_FOR_nop) ++ { ++ return INSN_ADDRESSES (INSN_UID (insn)); ++ } ++ } ++ return -1; ++} ++ ++/* Return the relative offet address of the end instruction of BB, ++ return -1 if it is empty or call instruction. */ ++ ++static int ++get_bb_end_addr (basic_block bb) ++{ ++ rtx_insn *insn; ++ int num_succs = EDGE_COUNT (bb->succs); ++ FOR_BB_INSNS_REVERSE (bb, insn) ++ { ++ if (!INSN_P (insn)) ++ { ++ continue; ++ } ++ /* The jump target of call is not in this function, so ++ it should be excluded. */ ++ if (CALL_P (insn)) ++ { ++ return -1; ++ } ++ if ((num_succs == 1) ++ || ((num_succs == 2) && any_condjump_p (insn))) ++ { ++ return INSN_ADDRESSES (INSN_UID (insn)); ++ } ++ else ++ { ++ return -1; ++ } ++ } ++ return -1; ++} ++ ++/* Return the end address of cfun. */ ++ ++static int ++get_function_end_addr () ++{ ++ rtx_insn *insn = get_last_insn (); ++ for (; insn != get_insns (); insn = PREV_INSN (insn)) ++ { ++ if (!INSN_P (insn)) ++ { ++ continue; ++ } ++ return INSN_ADDRESSES (INSN_UID (insn)); ++ } ++ ++ return -1; ++} ++ ++/* Return the function profile status string. */ ++ ++static const char * ++get_function_profile_status () ++{ ++ const char *profile_status[] = { ++ "PROFILE_ABSENT", ++ "PROFILE_GUESSED", ++ "PROFILE_READ", ++ "PROFILE_LAST" /* Last value, used by profile streaming. */ ++ }; ++ ++ return profile_status[profile_status_for_fn (cfun)]; ++} ++ ++/* Return the count from the feedback data, such as PGO or ADDO. */ ++ ++inline static gcov_type ++get_fdo_count (profile_count count) ++{ ++ return count.quality () >= GUESSED ++ ? count.to_gcov_type () : 0; ++} ++ ++/* Return the profile quality string. */ ++ ++static const char * ++get_fdo_count_quality (profile_count count) ++{ ++ const char *profile_quality[] = { ++ "UNINITIALIZED_PROFILE", ++ "GUESSED_LOCAL", ++ "GUESSED_GLOBAL0", ++ "GUESSED_GLOBAL0_ADJUSTED", ++ "GUESSED", ++ "AFDO", ++ "ADJUSTED", ++ "PRECISE" ++ }; ++ ++ return profile_quality[count.quality ()]; ++} ++ ++static const char * ++alias_local_functions (const char *fnname) ++{ ++ if (TREE_PUBLIC (cfun->decl)) ++ { ++ return fnname; ++ } ++ return concat (fnname, "/", lbasename (dump_base_name), NULL); ++} ++ ++/* Return function bind type string. */ ++ ++static const char * ++simple_get_function_bind () ++{ ++ const char *function_bind[] = { ++ "GLOBAL", ++ "WEAK", ++ "LOCAL", ++ "UNKNOWN" ++ }; ++ ++ if (TREE_PUBLIC (cfun->decl)) ++ { ++ if (!(DECL_WEAK (cfun->decl))) ++ { ++ return function_bind[0]; ++ } ++ else ++ { ++ return function_bind[1]; ++ } ++ } ++ else ++ { ++ return function_bind[2]; ++ } ++ ++ return function_bind[3]; ++} ++ ++/* Dumo the callee functions insn in bb by CALL_P (insn). */ ++ ++static void ++dump_direct_callee_info_to_asm (basic_block bb, gcov_type call_count) ++{ ++ rtx_insn *insn; ++ FOR_BB_INSNS (bb, insn) ++ { ++ if (insn && CALL_P (insn)) ++ { ++ tree callee = get_call_fndecl (insn); ++ ++ if (callee) ++ { ++ fprintf (asm_out_file, "\t.string \"%x\"\n", ++ INSN_ADDRESSES (INSN_UID (insn))); ++ ++ fprintf (asm_out_file, "\t.string \"%s%s\"\n", ++ ASM_FDO_CALLEE_FLAG, ++ alias_local_functions (get_fnname_from_decl (callee))); ++ ++ fprintf (asm_out_file, ++ "\t.string \"" HOST_WIDE_INT_PRINT_DEC "\"\n", ++ call_count); ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "call: %x --> %s \n", ++ INSN_ADDRESSES (INSN_UID (insn)), ++ alias_local_functions ++ (get_fnname_from_decl (callee))); ++ } ++ } ++ } ++ } ++} ++ ++/* Dump the edge info into asm. */ ++static int ++dump_edge_jump_info_to_asm (basic_block bb, gcov_type bb_count) ++{ ++ edge e; ++ edge_iterator ei; ++ gcov_type edge_total_count = 0; ++ ++ FOR_EACH_EDGE (e, ei, bb->succs) ++ { ++ gcov_type edge_count = get_fdo_count (e->count ()); ++ edge_total_count += edge_count; ++ ++ int edge_start_addr = get_bb_end_addr (e->src); ++ int edge_end_addr = get_bb_start_addr(e->dest); ++ ++ if (edge_start_addr == -1 || edge_end_addr == -1) ++ { ++ continue; ++ } ++ ++ /* This is a reserved assert for the original design. If this ++ assert is found, use the address of the previous instruction ++ as edge_start_addr. */ ++ gcc_assert (edge_start_addr != edge_end_addr); ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "edge: %x --> %x = (%ld)\n", ++ edge_start_addr, edge_end_addr, edge_count); ++ } ++ ++ if (edge_count > 0) ++ { ++ fprintf(asm_out_file, "\t.string \"%x\"\n", edge_start_addr); ++ fprintf(asm_out_file, "\t.string \"%x\"\n", edge_end_addr); ++ fprintf(asm_out_file, "\t.string \"" HOST_WIDE_INT_PRINT_DEC "\"\n", ++ edge_count); ++ } ++ } ++ ++ gcov_type call_count = MAX (edge_total_count, bb_count); ++ if (call_count > 0) ++ { ++ dump_direct_callee_info_to_asm (bb, call_count); ++ } ++} ++ ++/* Dump the bb info into asm. */ ++ ++static void ++dump_bb_info_to_asm (basic_block bb, gcov_type bb_count) ++{ ++ int bb_start_addr = get_bb_start_addr (bb); ++ if (bb_start_addr != -1) ++ { ++ fprintf (asm_out_file, "\t.string \"%x\"\n", bb_start_addr); ++ fprintf (asm_out_file, "\t.string \"" HOST_WIDE_INT_PRINT_DEC "\"\n", ++ bb_count); ++ } ++} ++ ++/* Dump the function info into asm. */ ++ ++static void ++dump_function_info_to_asm (const char *fnname) ++{ ++ fprintf (asm_out_file, "\t.string \"%s%s\"\n", ++ ASM_FDO_CALLER_FLAG, alias_local_functions (fnname)); ++ fprintf (asm_out_file, "\t.string \"%s%d\"\n", ++ ASM_FDO_CALLER_SIZE_FLAG, get_function_end_addr ()); ++ fprintf (asm_out_file, "\t.string \"%s%s\"\n", ++ ASM_FDO_CALLER_BIND_FLAG, simple_get_function_bind ()); ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "\n FUNC_NAME: %s\n", ++ alias_local_functions (fnname)); ++ fprintf (dump_file, " file: %s\n", ++ dump_base_name); ++ fprintf (dump_file, "profile_status: %s\n", ++ get_function_profile_status ()); ++ fprintf (dump_file, " size: %x\n", ++ get_function_end_addr ()); ++ fprintf (dump_file, " function_bind: %s\n", ++ simple_get_function_bind ()); ++ } ++} ++ ++/* Dump function profile into form AutoFDO or PGO to asm. */ ++ ++static void ++dump_fdo_info_to_asm (const char *fnname) ++{ ++ basic_block bb; ++ ++ dump_function_info_to_asm (fnname); ++ ++ FOR_EACH_BB_FN (bb, cfun) ++ { ++ gcov_type bb_count = get_fdo_count (bb->count); ++ if (bb_count == 0) ++ { ++ continue; ++ } ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "BB: %x --> %x = (%ld) [%s]\n", ++ get_bb_start_addr (bb), get_bb_end_addr (bb), ++ bb_count, get_fdo_count_quality (bb->count)); ++ } ++ ++ if (flag_profile_use) ++ { ++ dump_edge_jump_info_to_asm (bb, bb_count); ++ } ++ else if (flag_auto_profile) ++ { ++ dump_bb_info_to_asm (bb, bb_count); ++ } ++ } ++} ++ ++/* When -fauto-bolt option is turnded on, the .text.fdo section ++ will be generated in the *.s file if there is feedback information ++ from PGO or AutoFDO. This section will parserd in BOLT-plugin. */ ++ ++static void ++dump_profile_to_elf_sections () ++{ ++ if (!flag_function_sections) ++ { ++ error ("-fauto-bolt should work with -ffunction-section"); ++ return; ++ } ++ if (!flag_ipa_ra) ++ { ++ error ("-fauto-bolt should work with -fipa-ra"); ++ return; ++ } ++ if (flag_align_jumps) ++ { ++ error ("-fauto-bolt is not supported with -falign-jumps"); ++ return; ++ } ++ if (flag_align_labels) ++ { ++ error ("-fauto-bolt is not spported with -falign-loops"); ++ return; ++ } ++ if (flag_align_loops) ++ { ++ error ("-fauto-bolt is not supported with -falign-loops"); ++ return; ++ } ++ ++ /* Return if no feedback data. */ ++ if (!flag_profile_use && !flag_auto_profile) ++ { ++ error ("-fauto-bolt should use with -profile-use or -fauto-profile"); ++ return; ++ } ++ ++ /* Avoid empty functions. */ ++ if (TREE_CODE (cfun->decl) != FUNCTION_DECL) ++ { ++ return; ++ } ++ int flags = SECTION_DEBUG | SECTION_EXCLUDE; ++ const char *fnname = get_fnname_from_decl (current_function_decl); ++ char *profile_fnname = NULL; ++ ++ asprintf (&profile_fnname, "%s%s", ASM_FDO_SECTION_PREFIX, fnname); ++ switch_to_section (get_section (profile_fnname, flags, NULL)); ++ dump_fdo_info_to_asm (fnname); ++ ++ if (profile_fnname) ++ { ++ free (profile_fnname); ++ profile_fnname = NULL; ++ } ++} ++ + /* Turn the RTL into assembly. */ + static unsigned int + rest_of_handle_final (void) +@@ -4334,6 +4731,12 @@ rest_of_handle_final (void) + targetm.asm_out.destructor (XEXP (DECL_RTL (current_function_decl), 0), + decl_fini_priority_lookup + (current_function_decl)); ++ ++ if (flag_auto_bolt) ++ { ++ dump_profile_to_elf_sections (); ++ } ++ + return 0; + } + +diff --git a/gcc/opts.cc b/gcc/opts.cc +index b868d189e..6d57e7d69 100644 +--- a/gcc/opts.cc ++++ b/gcc/opts.cc +@@ -1279,6 +1279,10 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set, + if (opts->x_flag_vtable_verify && opts->x_flag_lto) + sorry ("vtable verification is not supported with LTO"); + ++ /* Currently -fauto-bolt is not supported for LTO. */ ++ if (opts->x_flag_auto_bolt && opts->x_flag_lto) ++ sorry ("%<-fauto-bolt%> is not supported with LTO"); ++ + /* Control IPA optimizations based on different -flive-patching level. */ + if (opts->x_flag_live_patching) + control_options_for_live_patching (opts, opts_set, +@@ -1291,6 +1295,58 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set, + = (opts->x_flag_unroll_loops + || opts->x_flag_peel_loops + || opts->x_optimize >= 3); ++ ++ if (opts->x_flag_auto_bolt) ++ { ++ /* Record the function section to facilitate the feedback ++ data storage. */ ++ if (!opts->x_flag_function_sections) ++ { ++ inform (loc, ++ "%<-fauto-bolt%> should work with %<-ffunction-sections%>," ++ " enabling %<-ffunction-sections%>"); ++ opts->x_flag_function_sections = true; ++ } ++ ++ /* Cancel the internal alignment of the function. The binary ++ optimizer bolt will cancel the internal alignment optimization ++ of the function, so the alignment is meaningless at this time, ++ and if not, it will bring trouble to the calculation of the ++ offset address of the instruction. */ ++ if (opts->x_flag_align_jumps) ++ { ++ inform (loc, ++ "%<-fauto-bolt%> should not work with %<-falign-jumps%>," ++ " disabling %<-falign-jumps%>"); ++ opts->x_flag_align_jumps = false; ++ } ++ ++ if (opts->x_flag_align_labels) ++ { ++ inform (loc, ++ "%<-fauto-bolt%> should not work with %<-falign-labels%>," ++ " disabling %<-falign-labels%>"); ++ opts->x_flag_align_labels = false; ++ } ++ ++ if (opts->x_flag_align_loops) ++ { ++ inform (loc, ++ "%<-fauto-bolt%> should not work with %<-falign-loops%>," ++ " disabling %<-falign-loops%>"); ++ opts->x_flag_align_loops = false; ++ } ++ ++ /* When parsing instructions in RTL phase, we need to know ++ the call information of instructions to avoid being optimized. */ ++ if (!opts->x_flag_ipa_ra) ++ { ++ inform (loc, ++ "%<-fauto-bolt%> should work with %<-fipa-ra%>," ++ " enabling %<-fipa-ra%>"); ++ opts->x_flag_ipa_ra = true; ++ } ++ } + + /* With -fcx-limited-range, we do cheap and quick complex arithmetic. */ + if (opts->x_flag_cx_limited_range) +@@ -3226,6 +3282,11 @@ common_handle_option (struct gcc_options *opts, + &opts->x_flag_align_functions, + &opts->x_str_align_functions); + break; ++ ++ case OPT_fauto_bolt_: ++ case OPT_fauto_bolt: ++ /* Deferred. */ ++ break; + + case OPT_ftabstop_: + /* It is documented that we silently ignore silly values. */ +-- +2.33.0 + diff --git a/0032-AutoBOLT-Add-bolt-linker-plugin-2-3.patch b/0032-AutoBOLT-Add-bolt-linker-plugin-2-3.patch new file mode 100644 index 0000000000000000000000000000000000000000..118d1ca7b5ccdf239a603d50e1d3b1627a706ee4 --- /dev/null +++ b/0032-AutoBOLT-Add-bolt-linker-plugin-2-3.patch @@ -0,0 +1,34094 @@ +From 82f9f48406955a6150def998b69b4eace4bd51eb Mon Sep 17 00:00:00 2001 +From: zhenyu--zhao_admin +Date: Thu, 7 Dec 2023 11:43:08 +0800 +Subject: [PATCH] [AutoBOLT] Add bolt linker plugin 2/3 + +--- + bolt-plugin/Makefile | 675 ++ + bolt-plugin/Makefile.am | 43 + + bolt-plugin/Makefile.in | 675 ++ + bolt-plugin/aclocal.m4 | 10250 +++++++++++++++++ + bolt-plugin/bolt-plugin.cc | 1153 ++ + bolt-plugin/config.h.in | 179 + + bolt-plugin/configure | 20909 +++++++++++++++++++++++++++++++++++ + bolt-plugin/configure.ac | 60 + + gcc/common.opt | 16 + + gcc/opts.cc | 27 +- + 10 files changed, 33985 insertions(+), 2 deletions(-) + create mode 100644 bolt-plugin/Makefile + create mode 100644 bolt-plugin/Makefile.am + create mode 100644 bolt-plugin/Makefile.in + create mode 100644 bolt-plugin/aclocal.m4 + create mode 100644 bolt-plugin/bolt-plugin.cc + create mode 100644 bolt-plugin/config.h.in + create mode 100755 bolt-plugin/configure + create mode 100644 bolt-plugin/configure.ac + +diff --git a/bolt-plugin/Makefile b/bolt-plugin/Makefile +new file mode 100644 +index 000000000..82a4bc2c6 +--- /dev/null ++++ b/bolt-plugin/Makefile +@@ -0,0 +1,675 @@ ++# Makefile.in generated by automake 1.16.5 from Makefile.am. ++# Makefile. Generated from Makefile.in by configure. ++ ++# Copyright (C) 1994-2021 Free Software Foundation, Inc. ++ ++# This Makefile.in is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY, to the extent permitted by law; without ++# even the implied warranty of MERCHANTABILITY or FITNESS FOR A ++# PARTICULAR PURPOSE. ++ ++ ++ ++ ++am__is_gnu_make = { \ ++ if test -z '$(MAKELEVEL)'; then \ ++ false; \ ++ elif test -n '$(MAKE_HOST)'; then \ ++ true; \ ++ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ ++ true; \ ++ else \ ++ false; \ ++ fi; \ ++} ++am__make_running_with_option = \ ++ case $${target_option-} in \ ++ ?) ;; \ ++ *) echo "am__make_running_with_option: internal error: invalid" \ ++ "target option '$${target_option-}' specified" >&2; \ ++ exit 1;; \ ++ esac; \ ++ has_opt=no; \ ++ sane_makeflags=$$MAKEFLAGS; \ ++ if $(am__is_gnu_make); then \ ++ sane_makeflags=$$MFLAGS; \ ++ else \ ++ case $$MAKEFLAGS in \ ++ *\\[\ \ ]*) \ ++ bs=\\; \ ++ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ ++ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ ++ esac; \ ++ fi; \ ++ skip_next=no; \ ++ strip_trailopt () \ ++ { \ ++ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ ++ }; \ ++ for flg in $$sane_makeflags; do \ ++ test $$skip_next = yes && { skip_next=no; continue; }; \ ++ case $$flg in \ ++ *=*|--*) continue;; \ ++ -*I) strip_trailopt 'I'; skip_next=yes;; \ ++ -*I?*) strip_trailopt 'I';; \ ++ -*O) strip_trailopt 'O'; skip_next=yes;; \ ++ -*O?*) strip_trailopt 'O';; \ ++ -*l) strip_trailopt 'l'; skip_next=yes;; \ ++ -*l?*) strip_trailopt 'l';; \ ++ -[dEDm]) skip_next=yes;; \ ++ -[JT]) skip_next=yes;; \ ++ esac; \ ++ case $$flg in \ ++ *$$target_option*) has_opt=yes; break;; \ ++ esac; \ ++ done; \ ++ test $$has_opt = yes ++am__make_dryrun = (target_option=n; $(am__make_running_with_option)) ++am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) ++pkgdatadir = $(datadir)/bolt-plugin ++pkgincludedir = $(includedir)/bolt-plugin ++pkglibdir = $(libdir)/bolt-plugin ++pkglibexecdir = $(libexecdir)/bolt-plugin ++am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd ++install_sh_DATA = $(install_sh) -c -m 644 ++install_sh_PROGRAM = $(install_sh) -c ++install_sh_SCRIPT = $(install_sh) -c ++INSTALL_HEADER = $(INSTALL_DATA) ++transform = $(program_transform_name) ++NORMAL_INSTALL = : ++PRE_INSTALL = : ++POST_INSTALL = : ++NORMAL_UNINSTALL = : ++PRE_UNINSTALL = : ++POST_UNINSTALL = : ++build_triplet = aarch64-unknown-linux-gnu ++host_triplet = aarch64-unknown-linux-gnu ++target_triplet = aarch64-unknown-linux-gnu ++subdir = . ++ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 ++am__aclocal_m4_deps = $(top_srcdir)/configure.ac ++am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ ++ $(ACLOCAL_M4) ++DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ ++ $(am__configure_deps) ++am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ ++ configure.lineno config.status.lineno ++mkinstalldirs = $(SHELL) $(top_srcdir)/../mkinstalldirs ++CONFIG_HEADER = config.h ++CONFIG_CLEAN_FILES = ++CONFIG_CLEAN_VPATH_FILES = ++am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; ++am__vpath_adj = case $$p in \ ++ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ ++ *) f=$$p;; \ ++ esac; ++am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; ++am__install_max = 40 ++am__nobase_strip_setup = \ ++ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` ++am__nobase_strip = \ ++ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" ++am__nobase_list = $(am__nobase_strip_setup); \ ++ for p in $$list; do echo "$$p $$p"; done | \ ++ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ ++ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ ++ if (++n[$$2] == $(am__install_max)) \ ++ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ ++ END { for (dir in files) print dir, files[dir] }' ++am__base_list = \ ++ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ ++ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' ++am__uninstall_files_from_dir = { \ ++ test -z "$$files" \ ++ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ ++ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ ++ $(am__cd) "$$dir" && rm -f $$files; }; \ ++ } ++am__installdirs = "$(DESTDIR)$(libexecsubdir)" ++LTLIBRARIES = $(libexecsub_LTLIBRARIES) ++am_libbolt_plugin_la_OBJECTS = bolt-plugin.lo ++libbolt_plugin_la_OBJECTS = $(am_libbolt_plugin_la_OBJECTS) ++AM_V_P = $(am__v_P_$(V)) ++am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY)) ++am__v_P_0 = false ++am__v_P_1 = : ++AM_V_GEN = $(am__v_GEN_$(V)) ++am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) ++am__v_GEN_0 = @echo " GEN " $@; ++am__v_GEN_1 = ++AM_V_at = $(am__v_at_$(V)) ++am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) ++am__v_at_0 = @ ++am__v_at_1 = ++DEFAULT_INCLUDES = -I. ++depcomp = ++am__maybe_remake_depfiles = ++CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ ++ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) ++AM_V_lt = $(am__v_lt_$(V)) ++am__v_lt_ = $(am__v_lt_$(AM_DEFAULT_VERBOSITY)) ++am__v_lt_0 = --silent ++am__v_lt_1 = ++LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ ++ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ ++ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ ++ $(AM_CXXFLAGS) $(CXXFLAGS) ++AM_V_CXX = $(am__v_CXX_$(V)) ++am__v_CXX_ = $(am__v_CXX_$(AM_DEFAULT_VERBOSITY)) ++am__v_CXX_0 = @echo " CXX " $@; ++am__v_CXX_1 = ++CXXLD = $(CXX) ++CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ ++ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ ++ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ ++AM_V_CXXLD = $(am__v_CXXLD_$(V)) ++am__v_CXXLD_ = $(am__v_CXXLD_$(AM_DEFAULT_VERBOSITY)) ++am__v_CXXLD_0 = @echo " CXXLD " $@; ++am__v_CXXLD_1 = ++SOURCES = $(libbolt_plugin_la_SOURCES) ++am__can_run_installinfo = \ ++ case $$AM_UPDATE_INFO_DIR in \ ++ n|no|NO) false;; \ ++ *) (install-info --version) >/dev/null 2>&1;; \ ++ esac ++am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) \ ++ config.h.in ++# Read a list of newline-separated strings from the standard input, ++# and print each of them once, without duplicates. Input order is ++# *not* preserved. ++am__uniquify_input = $(AWK) '\ ++ BEGIN { nonempty = 0; } \ ++ { items[$$0] = 1; nonempty = 1; } \ ++ END { if (nonempty) { for (i in items) print i; }; } \ ++' ++# Make sure the list of sources is unique. This is necessary because, ++# e.g., the same source file might be shared among _SOURCES variables ++# for different programs/libraries. ++am__define_uniq_tagged_files = \ ++ list='$(am__tagged_files)'; \ ++ unique=`for i in $$list; do \ ++ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ ++ done | $(am__uniquify_input)` ++AM_RECURSIVE_TARGETS = cscope ++ACLOCAL = ${SHELL} '/home/zzy/trans/gcc_1/missing' aclocal-1.16 ++AMTAR = $${TAR-tar} ++AM_DEFAULT_VERBOSITY = 1 ++AR = ar ++AUTOCONF = ${SHELL} '/home/zzy/trans/gcc_1/missing' autoconf ++AUTOHEADER = ${SHELL} '/home/zzy/trans/gcc_1/missing' autoheader ++AUTOMAKE = ${SHELL} '/home/zzy/trans/gcc_1/missing' automake-1.16 ++AWK = gawk ++CC = gcc ++CCDEPMODE = depmode=gcc3 ++CFLAGS = -g -O2 ++CPPFLAGS = ++CSCOPE = cscope ++CTAGS = ctags ++CXX = g++ ++CXXCPP = g++ -E ++CXXDEPMODE = depmode=gcc3 ++CXXFLAGS = -g -O2 ++CYGPATH_W = echo ++DEFS = -DHAVE_CONFIG_H ++DEPDIR = .deps ++DLLTOOL = false ++DSYMUTIL = ++DUMPBIN = ++ECHO_C = ++ECHO_N = -n ++ECHO_T = ++EGREP = /usr/bin/grep -E ++ETAGS = etags ++EXEEXT = ++FGREP = /usr/bin/grep -F ++FILECMD = file ++GREP = /usr/bin/grep ++INSTALL = /usr/bin/install -c ++INSTALL_DATA = ${INSTALL} -m 644 ++INSTALL_PROGRAM = ${INSTALL} ++INSTALL_SCRIPT = ${INSTALL} ++INSTALL_STRIP_PROGRAM = $(install_sh) -c -s ++LD = /usr/bin/ld ++LDFLAGS = ++LIBOBJS = ++LIBS = ++LIBTOOL = $(SHELL) $(top_builddir)/libtool ++LIPO = ++LN_S = ln -s ++LTLIBOBJS = ++LT_SYS_LIBRARY_PATH = ++MAINT = # ++MAKEINFO = ${SHELL} '/home/zzy/trans/gcc_1/missing' makeinfo ++MANIFEST_TOOL = : ++MKDIR_P = /usr/bin/mkdir -p ++NM = /usr/bin/nm -B ++NMEDIT = ++OBJDUMP = objdump ++OBJEXT = o ++OTOOL = ++OTOOL64 = ++PACKAGE = bolt-plugin ++PACKAGE_BUGREPORT = ++PACKAGE_NAME = bolt plugin for ld ++PACKAGE_STRING = bolt plugin for ld 0.1 ++PACKAGE_TARNAME = bolt-plugin ++PACKAGE_URL = ++PACKAGE_VERSION = 0.1 ++PATH_SEPARATOR = : ++RANLIB = ranlib ++SED = /usr/bin/sed ++SET_MAKE = ++SHELL = /bin/sh ++STRIP = strip ++VERSION = 0.1 ++abs_builddir = /home/zzy/trans/gcc_1/bolt-plugin ++abs_srcdir = /home/zzy/trans/gcc_1/bolt-plugin ++abs_top_builddir = /home/zzy/trans/gcc_1/bolt-plugin ++abs_top_srcdir = /home/zzy/trans/gcc_1/bolt-plugin ++ac_bolt_plugin_ldflags = -Wc,-static-libgcc ++ac_ct_AR = ar ++ac_ct_CC = gcc ++ac_ct_CXX = g++ ++ac_ct_DUMPBIN = ++accel_dir_suffix = ++am__include = include ++am__leading_dot = . ++am__quote = ++am__tar = $${TAR-tar} chof - "$$tardir" ++am__untar = $${TAR-tar} xf - ++bindir = ${exec_prefix}/bin ++build = aarch64-unknown-linux-gnu ++build_alias = ++build_cpu = aarch64 ++build_os = linux-gnu ++build_vendor = unknown ++builddir = . ++datadir = ${datarootdir} ++datarootdir = ${prefix}/share ++docdir = ${datarootdir}/doc/${PACKAGE_TARNAME} ++dvidir = ${docdir} ++exec_prefix = ${prefix} ++gcc_build_dir = ../..//gcc ++host = aarch64-unknown-linux-gnu ++host_alias = ++host_cpu = aarch64 ++host_os = linux-gnu ++host_vendor = unknown ++htmldir = ${docdir} ++includedir = ${prefix}/include ++infodir = ${datarootdir}/info ++install_sh = ${SHELL} /home/zzy/trans/gcc_1/install-sh ++libdir = ${exec_prefix}/lib ++libexecdir = ${exec_prefix}/libexec ++localedir = ${datarootdir}/locale ++localstatedir = ${prefix}/var ++mandir = ${datarootdir}/man ++mkdir_p = $(MKDIR_P) ++oldincludedir = /usr/include ++pdfdir = ${docdir} ++prefix = /usr/local ++program_transform_name = s,x,x, ++psdir = ${docdir} ++real_target_noncanonical = ++runstatedir = ${localstatedir}/run ++sbindir = ${exec_prefix}/sbin ++sharedstatedir = ${prefix}/com ++srcdir = . ++sysconfdir = ${prefix}/etc ++target = aarch64-unknown-linux-gnu ++target_alias = ++target_cpu = aarch64 ++target_noncanonical := ++target_os = linux-gnu ++target_vendor = unknown ++top_build_prefix = ++top_builddir = . ++top_srcdir = . ++with_libiberty = ../libiberty ++ACLOCAL_AMFLAGS = -I .. -I ../config ++AUTOMAKE_OPTIONS = no-dependencies ++gcc_version := $(shell @get_gcc_base_ver@ $(top_srcdir)/../gcc/BASE-VER) ++libexecsubdir := $(libexecdir)/gcc/$(real_target_noncanonical)/$(gcc_version)$(accel_dir_suffix) ++AM_CPPFLAGS = -I$(top_srcdir)/../include $(DEFS) -std=c++11 ++AM_CXXFLAGS = @ac_bolt_plugin_warn_cflags@ -std=c++11 ++AM_LDFLAGS = -Wc,-static-libgcc ++AM_LIBTOOLFLAGS = --tag=disable-static ++libexecsub_LTLIBRARIES = libbolt_plugin.la ++in_gcc_libs = $(foreach lib, $(libexecsub_LTLIBRARIES), $(gcc_build_dir)/$(lib)) ++libbolt_plugin_la_SOURCES = bolt-plugin.cc ++# Note that we intentionally override the bindir supplied by ACX_LT_HOST_FLAGS. ++libbolt_plugin_la_LDFLAGS = $(AM_LDFLAGS) $(lt_host_flags) -module \ ++ -bindir $(libexecsubdir) $(if $(wildcard \ ++ $(libiberty_noasan)),, $(if $(wildcard \ ++ $(libiberty_pic)),,-Wc,$(libiberty))) ++# Can be simplified when libiberty becomes a normal convenience library. ++libiberty = $(with_libiberty)/libiberty.a ++libiberty_noasan = $(with_libiberty)/noasan/libiberty.a ++libiberty_pic = $(with_libiberty)/pic/libiberty.a ++Wc = -Wc, ++libbolt_plugin_la_LIBADD = \ ++ $(if $(wildcard $(libiberty_noasan)),$(Wc)$(libiberty_noasan), \ ++ $(if $(wildcard $(libiberty_pic)),$(Wc)$(libiberty_pic),)) ++ ++libbolt_plugin_la_DEPENDENCIES = \ ++ $(if $(wildcard $(libiberty_noasan)),$(libiberty_noasan), \ ++ $(if $(wildcard $(libiberty_pic)),$(libiberty_pic),)) ++ ++LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/../libtool-ldflags $(LDFLAGS)) ++libbolt_plugin_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ ++ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) ${AM_CPPFLAGS} $(CXXFLAGS) \ ++ $(libbolt_plugin_la_LDFLAGS) $(LTLDFLAGS) -o $@ ++ ++all: config.h ++ $(MAKE) $(AM_MAKEFLAGS) all-am ++ ++.SUFFIXES: ++.SUFFIXES: .cc .lo .o .obj ++am--refresh: Makefile ++ @: ++$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps) ++ @for dep in $?; do \ ++ case '$(am__configure_deps)' in \ ++ *$$dep*) \ ++ echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ ++ $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ ++ && exit 0; \ ++ exit 1;; \ ++ esac; \ ++ done; \ ++ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ ++ $(am__cd) $(top_srcdir) && \ ++ $(AUTOMAKE) --foreign Makefile ++Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status ++ @case '$?' in \ ++ *config.status*) \ ++ echo ' $(SHELL) ./config.status'; \ ++ $(SHELL) ./config.status;; \ ++ *) \ ++ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles)'; \ ++ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles);; \ ++ esac; ++ ++$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) ++ $(SHELL) ./config.status --recheck ++ ++$(top_srcdir)/configure: # $(am__configure_deps) ++ $(am__cd) $(srcdir) && $(AUTOCONF) ++$(ACLOCAL_M4): # $(am__aclocal_m4_deps) ++ $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) ++$(am__aclocal_m4_deps): ++ ++config.h: stamp-h1 ++ @test -f $@ || rm -f stamp-h1 ++ @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1 ++ ++stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status ++ @rm -f stamp-h1 ++ cd $(top_builddir) && $(SHELL) ./config.status config.h ++$(srcdir)/config.h.in: # $(am__configure_deps) ++ ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) ++ rm -f stamp-h1 ++ touch $@ ++ ++distclean-hdr: ++ -rm -f config.h stamp-h1 ++ ++install-libexecsubLTLIBRARIES: $(libexecsub_LTLIBRARIES) ++ @$(NORMAL_INSTALL) ++ @list='$(libexecsub_LTLIBRARIES)'; test -n "$(libexecsubdir)" || list=; \ ++ list2=; for p in $$list; do \ ++ if test -f $$p; then \ ++ list2="$$list2 $$p"; \ ++ else :; fi; \ ++ done; \ ++ test -z "$$list2" || { \ ++ echo " $(MKDIR_P) '$(DESTDIR)$(libexecsubdir)'"; \ ++ $(MKDIR_P) "$(DESTDIR)$(libexecsubdir)" || exit 1; \ ++ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libexecsubdir)'"; \ ++ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libexecsubdir)"; \ ++ } ++ ++uninstall-libexecsubLTLIBRARIES: ++ @$(NORMAL_UNINSTALL) ++ @list='$(libexecsub_LTLIBRARIES)'; test -n "$(libexecsubdir)" || list=; \ ++ for p in $$list; do \ ++ $(am__strip_dir) \ ++ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libexecsubdir)/$$f'"; \ ++ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libexecsubdir)/$$f"; \ ++ done ++ ++clean-libexecsubLTLIBRARIES: ++ -test -z "$(libexecsub_LTLIBRARIES)" || rm -f $(libexecsub_LTLIBRARIES) ++ @list='$(libexecsub_LTLIBRARIES)'; \ ++ locs=`for p in $$list; do echo $$p; done | \ ++ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ ++ sort -u`; \ ++ test -z "$$locs" || { \ ++ echo rm -f $${locs}; \ ++ rm -f $${locs}; \ ++ } ++ ++libbolt_plugin.la: $(libbolt_plugin_la_OBJECTS) $(libbolt_plugin_la_DEPENDENCIES) $(EXTRA_libbolt_plugin_la_DEPENDENCIES) ++ $(AM_V_GEN)$(libbolt_plugin_la_LINK) -rpath $(libexecsubdir) $(libbolt_plugin_la_OBJECTS) $(libbolt_plugin_la_LIBADD) $(LIBS) ++ ++mostlyclean-compile: ++ -rm -f *.$(OBJEXT) ++ ++distclean-compile: ++ -rm -f *.tab.c ++ ++.cc.o: ++ $(AM_V_CXX)$(CXXCOMPILE) -c -o $@ $< ++ ++.cc.obj: ++ $(AM_V_CXX)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` ++ ++.cc.lo: ++ $(AM_V_CXX)$(LTCXXCOMPILE) -c -o $@ $< ++ ++mostlyclean-libtool: ++ -rm -f *.lo ++ ++clean-libtool: ++ -rm -rf .libs _libs ++ ++distclean-libtool: ++ -rm -f libtool config.lt ++ ++ID: $(am__tagged_files) ++ $(am__define_uniq_tagged_files); mkid -fID $$unique ++tags: tags-am ++TAGS: tags ++ ++tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) ++ set x; \ ++ here=`pwd`; \ ++ $(am__define_uniq_tagged_files); \ ++ shift; \ ++ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ ++ test -n "$$unique" || unique=$$empty_fix; \ ++ if test $$# -gt 0; then \ ++ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ ++ "$$@" $$unique; \ ++ else \ ++ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ ++ $$unique; \ ++ fi; \ ++ fi ++ctags: ctags-am ++ ++CTAGS: ctags ++ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) ++ $(am__define_uniq_tagged_files); \ ++ test -z "$(CTAGS_ARGS)$$unique" \ ++ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ ++ $$unique ++ ++GTAGS: ++ here=`$(am__cd) $(top_builddir) && pwd` \ ++ && $(am__cd) $(top_srcdir) \ ++ && gtags -i $(GTAGS_ARGS) "$$here" ++cscope: cscope.files ++ test ! -s cscope.files \ ++ || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) ++clean-cscope: ++ -rm -f cscope.files ++cscope.files: clean-cscope cscopelist ++cscopelist: cscopelist-am ++ ++cscopelist-am: $(am__tagged_files) ++ list='$(am__tagged_files)'; \ ++ case "$(srcdir)" in \ ++ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ ++ *) sdir=$(subdir)/$(srcdir) ;; \ ++ esac; \ ++ for i in $$list; do \ ++ if test -f "$$i"; then \ ++ echo "$(subdir)/$$i"; \ ++ else \ ++ echo "$$sdir/$$i"; \ ++ fi; \ ++ done >> $(top_builddir)/cscope.files ++ ++distclean-tags: ++ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags ++ -rm -f cscope.out cscope.in.out cscope.po.out cscope.files ++check-am: all-am ++check: check-am ++all-am: Makefile $(LTLIBRARIES) config.h ++installdirs: ++ for dir in "$(DESTDIR)$(libexecsubdir)"; do \ ++ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ ++ done ++install: install-am ++install-exec: install-exec-am ++install-data: install-data-am ++uninstall: uninstall-am ++ ++install-am: all-am ++ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am ++ ++installcheck: installcheck-am ++install-strip: ++ if test -z '$(STRIP)'; then \ ++ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ ++ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ ++ install; \ ++ else \ ++ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ ++ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ ++ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ ++ fi ++mostlyclean-generic: ++ ++clean-generic: ++ ++distclean-generic: ++ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) ++ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) ++ ++maintainer-clean-generic: ++ @echo "This command is intended for maintainers to use" ++ @echo "it deletes files that may require special tools to rebuild." ++clean: clean-am ++ ++clean-am: clean-generic clean-libexecsubLTLIBRARIES clean-libtool \ ++ mostlyclean-am ++ ++distclean: distclean-am ++ -rm -f $(am__CONFIG_DISTCLEAN_FILES) ++ -rm -f Makefile ++distclean-am: clean-am distclean-compile distclean-generic \ ++ distclean-hdr distclean-libtool distclean-tags ++ ++dvi: dvi-am ++ ++dvi-am: ++ ++html: html-am ++ ++html-am: ++ ++info: info-am ++ ++info-am: ++ ++install-data-am: ++ ++install-dvi: install-dvi-am ++ ++install-dvi-am: ++ ++install-exec-am: install-libexecsubLTLIBRARIES ++ ++install-html: install-html-am ++ ++install-html-am: ++ ++install-info: install-info-am ++ ++install-info-am: ++ ++install-man: ++ ++install-pdf: install-pdf-am ++ ++install-pdf-am: ++ ++install-ps: install-ps-am ++ ++install-ps-am: ++ ++installcheck-am: ++ ++maintainer-clean: maintainer-clean-am ++ -rm -f $(am__CONFIG_DISTCLEAN_FILES) ++ -rm -rf $(top_srcdir)/autom4te.cache ++ -rm -f Makefile ++maintainer-clean-am: distclean-am maintainer-clean-generic ++ ++mostlyclean: mostlyclean-am ++ ++mostlyclean-am: mostlyclean-compile mostlyclean-generic \ ++ mostlyclean-libtool ++ ++pdf: pdf-am ++ ++pdf-am: ++ ++ps: ps-am ++ ++ps-am: ++ ++uninstall-am: uninstall-libexecsubLTLIBRARIES ++ ++.MAKE: all install-am install-strip ++ ++.PHONY: CTAGS GTAGS TAGS all all-am am--refresh check check-am clean \ ++ clean-cscope clean-generic clean-libexecsubLTLIBRARIES \ ++ clean-libtool cscope cscopelist-am ctags ctags-am distclean \ ++ distclean-compile distclean-generic distclean-hdr \ ++ distclean-libtool distclean-tags dvi dvi-am html html-am info \ ++ info-am install install-am install-data install-data-am \ ++ install-dvi install-dvi-am install-exec install-exec-am \ ++ install-html install-html-am install-info install-info-am \ ++ install-libexecsubLTLIBRARIES install-man install-pdf \ ++ install-pdf-am install-ps install-ps-am install-strip \ ++ installcheck installcheck-am installdirs maintainer-clean \ ++ maintainer-clean-generic mostlyclean mostlyclean-compile \ ++ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ ++ tags tags-am uninstall uninstall-am \ ++ uninstall-libexecsubLTLIBRARIES ++ ++.PRECIOUS: Makefile ++ ++override CXXFLAGS := $(filter-out -fsanitize=address,$(CXXFLAGS)) ++override LDFLAGS := $(filter-out -fsanitize=address,$(LDFLAGS)) ++ ++# Tell versions [3.59,3.63) of GNU make to not export all variables. ++# Otherwise a system limit (for SysV at least) may be exceeded. ++.NOEXPORT: +diff --git a/bolt-plugin/Makefile.am b/bolt-plugin/Makefile.am +new file mode 100644 +index 000000000..c21999237 +--- /dev/null ++++ b/bolt-plugin/Makefile.am +@@ -0,0 +1,43 @@ ++## Process this file with automake to produce Makefile.in. ++ ++ACLOCAL_AMFLAGS = -I .. -I ../config ++AUTOMAKE_OPTIONS = no-dependencies ++ ++gcc_version := $(shell @get_gcc_base_ver@ $(top_srcdir)/../gcc/BASE-VER) ++target_noncanonical := @target_noncanonical@ ++libexecsubdir := $(libexecdir)/gcc/$(real_target_noncanonical)/$(gcc_version)$(accel_dir_suffix) ++ ++AM_CPPFLAGS = -I$(top_srcdir)/../include $(DEFS) -std=c++11 ++AM_CXXFLAGS = @ac_bolt_plugin_warn_cflags@ -std=c++11 ++AM_LDFLAGS = @ac_bolt_plugin_ldflags@ ++AM_LIBTOOLFLAGS = --tag=disable-static ++override CXXFLAGS := $(filter-out -fsanitize=address,$(CXXFLAGS)) ++override LDFLAGS := $(filter-out -fsanitize=address,$(LDFLAGS)) ++ ++libexecsub_LTLIBRARIES = libbolt_plugin.la ++gcc_build_dir = @gcc_build_dir@ ++in_gcc_libs = $(foreach lib, $(libexecsub_LTLIBRARIES), $(gcc_build_dir)/$(lib)) ++ ++libbolt_plugin_la_SOURCES = bolt-plugin.cc ++# Note that we intentionally override the bindir supplied by ACX_LT_HOST_FLAGS. ++libbolt_plugin_la_LDFLAGS = $(AM_LDFLAGS) \ ++ $(lt_host_flags) -module -bindir $(libexecsubdir) ++# Can be simplified when libiberty becomes a normal convenience library. ++libiberty = $(with_libiberty)/libiberty.a ++libiberty_noasan = $(with_libiberty)/noasan/libiberty.a ++libiberty_pic = $(with_libiberty)/pic/libiberty.a ++Wc=-Wc, ++libbolt_plugin_la_LIBADD = \ ++ $(if $(wildcard $(libiberty_noasan)),$(Wc)$(libiberty_noasan), \ ++ $(if $(wildcard $(libiberty_pic)),$(Wc)$(libiberty_pic),)) ++libbolt_plugin_la_LDFLAGS += \ ++ $(if $(wildcard $(libiberty_noasan)),, \ ++ $(if $(wildcard $(libiberty_pic)),,-Wc,$(libiberty))) ++libbolt_plugin_la_DEPENDENCIES = \ ++ $(if $(wildcard $(libiberty_noasan)),$(libiberty_noasan), \ ++ $(if $(wildcard $(libiberty_pic)),$(libiberty_pic),)) ++LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/../libtool-ldflags $(LDFLAGS)) ++libbolt_plugin_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ ++ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) ${AM_CPPFLAGS} $(CXXFLAGS) \ ++ $(libbolt_plugin_la_LDFLAGS) $(LTLDFLAGS) -o $@ ++ +diff --git a/bolt-plugin/Makefile.in b/bolt-plugin/Makefile.in +new file mode 100644 +index 000000000..11b59407e +--- /dev/null ++++ b/bolt-plugin/Makefile.in +@@ -0,0 +1,675 @@ ++# Makefile.in generated by automake 1.16.5 from Makefile.am. ++# @configure_input@ ++ ++# Copyright (C) 1994-2021 Free Software Foundation, Inc. ++ ++# This Makefile.in is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY, to the extent permitted by law; without ++# even the implied warranty of MERCHANTABILITY or FITNESS FOR A ++# PARTICULAR PURPOSE. ++ ++@SET_MAKE@ ++ ++VPATH = @srcdir@ ++am__is_gnu_make = { \ ++ if test -z '$(MAKELEVEL)'; then \ ++ false; \ ++ elif test -n '$(MAKE_HOST)'; then \ ++ true; \ ++ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ ++ true; \ ++ else \ ++ false; \ ++ fi; \ ++} ++am__make_running_with_option = \ ++ case $${target_option-} in \ ++ ?) ;; \ ++ *) echo "am__make_running_with_option: internal error: invalid" \ ++ "target option '$${target_option-}' specified" >&2; \ ++ exit 1;; \ ++ esac; \ ++ has_opt=no; \ ++ sane_makeflags=$$MAKEFLAGS; \ ++ if $(am__is_gnu_make); then \ ++ sane_makeflags=$$MFLAGS; \ ++ else \ ++ case $$MAKEFLAGS in \ ++ *\\[\ \ ]*) \ ++ bs=\\; \ ++ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ ++ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ ++ esac; \ ++ fi; \ ++ skip_next=no; \ ++ strip_trailopt () \ ++ { \ ++ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ ++ }; \ ++ for flg in $$sane_makeflags; do \ ++ test $$skip_next = yes && { skip_next=no; continue; }; \ ++ case $$flg in \ ++ *=*|--*) continue;; \ ++ -*I) strip_trailopt 'I'; skip_next=yes;; \ ++ -*I?*) strip_trailopt 'I';; \ ++ -*O) strip_trailopt 'O'; skip_next=yes;; \ ++ -*O?*) strip_trailopt 'O';; \ ++ -*l) strip_trailopt 'l'; skip_next=yes;; \ ++ -*l?*) strip_trailopt 'l';; \ ++ -[dEDm]) skip_next=yes;; \ ++ -[JT]) skip_next=yes;; \ ++ esac; \ ++ case $$flg in \ ++ *$$target_option*) has_opt=yes; break;; \ ++ esac; \ ++ done; \ ++ test $$has_opt = yes ++am__make_dryrun = (target_option=n; $(am__make_running_with_option)) ++am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) ++pkgdatadir = $(datadir)/@PACKAGE@ ++pkgincludedir = $(includedir)/@PACKAGE@ ++pkglibdir = $(libdir)/@PACKAGE@ ++pkglibexecdir = $(libexecdir)/@PACKAGE@ ++am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd ++install_sh_DATA = $(install_sh) -c -m 644 ++install_sh_PROGRAM = $(install_sh) -c ++install_sh_SCRIPT = $(install_sh) -c ++INSTALL_HEADER = $(INSTALL_DATA) ++transform = $(program_transform_name) ++NORMAL_INSTALL = : ++PRE_INSTALL = : ++POST_INSTALL = : ++NORMAL_UNINSTALL = : ++PRE_UNINSTALL = : ++POST_UNINSTALL = : ++build_triplet = @build@ ++host_triplet = @host@ ++target_triplet = @target@ ++subdir = . ++ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 ++am__aclocal_m4_deps = $(top_srcdir)/configure.ac ++am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ ++ $(ACLOCAL_M4) ++DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ ++ $(am__configure_deps) ++am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ ++ configure.lineno config.status.lineno ++mkinstalldirs = $(SHELL) $(top_srcdir)/../mkinstalldirs ++CONFIG_HEADER = config.h ++CONFIG_CLEAN_FILES = ++CONFIG_CLEAN_VPATH_FILES = ++am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; ++am__vpath_adj = case $$p in \ ++ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ ++ *) f=$$p;; \ ++ esac; ++am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; ++am__install_max = 40 ++am__nobase_strip_setup = \ ++ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` ++am__nobase_strip = \ ++ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" ++am__nobase_list = $(am__nobase_strip_setup); \ ++ for p in $$list; do echo "$$p $$p"; done | \ ++ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ ++ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ ++ if (++n[$$2] == $(am__install_max)) \ ++ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ ++ END { for (dir in files) print dir, files[dir] }' ++am__base_list = \ ++ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ ++ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' ++am__uninstall_files_from_dir = { \ ++ test -z "$$files" \ ++ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ ++ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ ++ $(am__cd) "$$dir" && rm -f $$files; }; \ ++ } ++am__installdirs = "$(DESTDIR)$(libexecsubdir)" ++LTLIBRARIES = $(libexecsub_LTLIBRARIES) ++am_libbolt_plugin_la_OBJECTS = bolt-plugin.lo ++libbolt_plugin_la_OBJECTS = $(am_libbolt_plugin_la_OBJECTS) ++AM_V_P = $(am__v_P_@AM_V@) ++am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) ++am__v_P_0 = false ++am__v_P_1 = : ++AM_V_GEN = $(am__v_GEN_@AM_V@) ++am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) ++am__v_GEN_0 = @echo " GEN " $@; ++am__v_GEN_1 = ++AM_V_at = $(am__v_at_@AM_V@) ++am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) ++am__v_at_0 = @ ++am__v_at_1 = ++DEFAULT_INCLUDES = -I.@am__isrc@ ++depcomp = ++am__maybe_remake_depfiles = ++CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ ++ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) ++AM_V_lt = $(am__v_lt_@AM_V@) ++am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) ++am__v_lt_0 = --silent ++am__v_lt_1 = ++LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ ++ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ ++ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ ++ $(AM_CXXFLAGS) $(CXXFLAGS) ++AM_V_CXX = $(am__v_CXX_@AM_V@) ++am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) ++am__v_CXX_0 = @echo " CXX " $@; ++am__v_CXX_1 = ++CXXLD = $(CXX) ++CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ ++ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ ++ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ ++AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) ++am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) ++am__v_CXXLD_0 = @echo " CXXLD " $@; ++am__v_CXXLD_1 = ++SOURCES = $(libbolt_plugin_la_SOURCES) ++am__can_run_installinfo = \ ++ case $$AM_UPDATE_INFO_DIR in \ ++ n|no|NO) false;; \ ++ *) (install-info --version) >/dev/null 2>&1;; \ ++ esac ++am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) \ ++ config.h.in ++# Read a list of newline-separated strings from the standard input, ++# and print each of them once, without duplicates. Input order is ++# *not* preserved. ++am__uniquify_input = $(AWK) '\ ++ BEGIN { nonempty = 0; } \ ++ { items[$$0] = 1; nonempty = 1; } \ ++ END { if (nonempty) { for (i in items) print i; }; } \ ++' ++# Make sure the list of sources is unique. This is necessary because, ++# e.g., the same source file might be shared among _SOURCES variables ++# for different programs/libraries. ++am__define_uniq_tagged_files = \ ++ list='$(am__tagged_files)'; \ ++ unique=`for i in $$list; do \ ++ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ ++ done | $(am__uniquify_input)` ++AM_RECURSIVE_TARGETS = cscope ++ACLOCAL = @ACLOCAL@ ++AMTAR = @AMTAR@ ++AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ ++AR = @AR@ ++AUTOCONF = @AUTOCONF@ ++AUTOHEADER = @AUTOHEADER@ ++AUTOMAKE = @AUTOMAKE@ ++AWK = @AWK@ ++CC = @CC@ ++CCDEPMODE = @CCDEPMODE@ ++CFLAGS = @CFLAGS@ ++CPPFLAGS = @CPPFLAGS@ ++CSCOPE = @CSCOPE@ ++CTAGS = @CTAGS@ ++CXX = @CXX@ ++CXXCPP = @CXXCPP@ ++CXXDEPMODE = @CXXDEPMODE@ ++CXXFLAGS = @CXXFLAGS@ ++CYGPATH_W = @CYGPATH_W@ ++DEFS = @DEFS@ ++DEPDIR = @DEPDIR@ ++DLLTOOL = @DLLTOOL@ ++DSYMUTIL = @DSYMUTIL@ ++DUMPBIN = @DUMPBIN@ ++ECHO_C = @ECHO_C@ ++ECHO_N = @ECHO_N@ ++ECHO_T = @ECHO_T@ ++EGREP = @EGREP@ ++ETAGS = @ETAGS@ ++EXEEXT = @EXEEXT@ ++FGREP = @FGREP@ ++FILECMD = @FILECMD@ ++GREP = @GREP@ ++INSTALL = @INSTALL@ ++INSTALL_DATA = @INSTALL_DATA@ ++INSTALL_PROGRAM = @INSTALL_PROGRAM@ ++INSTALL_SCRIPT = @INSTALL_SCRIPT@ ++INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ ++LD = @LD@ ++LDFLAGS = @LDFLAGS@ ++LIBOBJS = @LIBOBJS@ ++LIBS = @LIBS@ ++LIBTOOL = @LIBTOOL@ ++LIPO = @LIPO@ ++LN_S = @LN_S@ ++LTLIBOBJS = @LTLIBOBJS@ ++LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ ++MAINT = @MAINT@ ++MAKEINFO = @MAKEINFO@ ++MANIFEST_TOOL = @MANIFEST_TOOL@ ++MKDIR_P = @MKDIR_P@ ++NM = @NM@ ++NMEDIT = @NMEDIT@ ++OBJDUMP = @OBJDUMP@ ++OBJEXT = @OBJEXT@ ++OTOOL = @OTOOL@ ++OTOOL64 = @OTOOL64@ ++PACKAGE = @PACKAGE@ ++PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ ++PACKAGE_NAME = @PACKAGE_NAME@ ++PACKAGE_STRING = @PACKAGE_STRING@ ++PACKAGE_TARNAME = @PACKAGE_TARNAME@ ++PACKAGE_URL = @PACKAGE_URL@ ++PACKAGE_VERSION = @PACKAGE_VERSION@ ++PATH_SEPARATOR = @PATH_SEPARATOR@ ++RANLIB = @RANLIB@ ++SED = @SED@ ++SET_MAKE = @SET_MAKE@ ++SHELL = @SHELL@ ++STRIP = @STRIP@ ++VERSION = @VERSION@ ++abs_builddir = @abs_builddir@ ++abs_srcdir = @abs_srcdir@ ++abs_top_builddir = @abs_top_builddir@ ++abs_top_srcdir = @abs_top_srcdir@ ++ac_bolt_plugin_ldflags = @ac_bolt_plugin_ldflags@ ++ac_ct_AR = @ac_ct_AR@ ++ac_ct_CC = @ac_ct_CC@ ++ac_ct_CXX = @ac_ct_CXX@ ++ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ ++accel_dir_suffix = @accel_dir_suffix@ ++am__include = @am__include@ ++am__leading_dot = @am__leading_dot@ ++am__quote = @am__quote@ ++am__tar = @am__tar@ ++am__untar = @am__untar@ ++bindir = @bindir@ ++build = @build@ ++build_alias = @build_alias@ ++build_cpu = @build_cpu@ ++build_os = @build_os@ ++build_vendor = @build_vendor@ ++builddir = @builddir@ ++datadir = @datadir@ ++datarootdir = @datarootdir@ ++docdir = @docdir@ ++dvidir = @dvidir@ ++exec_prefix = @exec_prefix@ ++gcc_build_dir = @gcc_build_dir@ ++host = @host@ ++host_alias = @host_alias@ ++host_cpu = @host_cpu@ ++host_os = @host_os@ ++host_vendor = @host_vendor@ ++htmldir = @htmldir@ ++includedir = @includedir@ ++infodir = @infodir@ ++install_sh = @install_sh@ ++libdir = @libdir@ ++libexecdir = @libexecdir@ ++localedir = @localedir@ ++localstatedir = @localstatedir@ ++mandir = @mandir@ ++mkdir_p = @mkdir_p@ ++oldincludedir = @oldincludedir@ ++pdfdir = @pdfdir@ ++prefix = @prefix@ ++program_transform_name = @program_transform_name@ ++psdir = @psdir@ ++real_target_noncanonical = @real_target_noncanonical@ ++runstatedir = @runstatedir@ ++sbindir = @sbindir@ ++sharedstatedir = @sharedstatedir@ ++srcdir = @srcdir@ ++sysconfdir = @sysconfdir@ ++target = @target@ ++target_alias = @target_alias@ ++target_cpu = @target_cpu@ ++target_noncanonical := @target_noncanonical@ ++target_os = @target_os@ ++target_vendor = @target_vendor@ ++top_build_prefix = @top_build_prefix@ ++top_builddir = @top_builddir@ ++top_srcdir = @top_srcdir@ ++with_libiberty = @with_libiberty@ ++ACLOCAL_AMFLAGS = -I .. -I ../config ++AUTOMAKE_OPTIONS = no-dependencies ++gcc_version := $(shell @get_gcc_base_ver@ $(top_srcdir)/../gcc/BASE-VER) ++libexecsubdir := $(libexecdir)/gcc/$(real_target_noncanonical)/$(gcc_version)$(accel_dir_suffix) ++AM_CPPFLAGS = -I$(top_srcdir)/../include $(DEFS) -std=c++11 ++AM_CXXFLAGS = @ac_bolt_plugin_warn_cflags@ -std=c++11 ++AM_LDFLAGS = @ac_bolt_plugin_ldflags@ ++AM_LIBTOOLFLAGS = --tag=disable-static ++libexecsub_LTLIBRARIES = libbolt_plugin.la ++in_gcc_libs = $(foreach lib, $(libexecsub_LTLIBRARIES), $(gcc_build_dir)/$(lib)) ++libbolt_plugin_la_SOURCES = bolt-plugin.cc ++# Note that we intentionally override the bindir supplied by ACX_LT_HOST_FLAGS. ++libbolt_plugin_la_LDFLAGS = $(AM_LDFLAGS) $(lt_host_flags) -module \ ++ -bindir $(libexecsubdir) $(if $(wildcard \ ++ $(libiberty_noasan)),, $(if $(wildcard \ ++ $(libiberty_pic)),,-Wc,$(libiberty))) ++# Can be simplified when libiberty becomes a normal convenience library. ++libiberty = $(with_libiberty)/libiberty.a ++libiberty_noasan = $(with_libiberty)/noasan/libiberty.a ++libiberty_pic = $(with_libiberty)/pic/libiberty.a ++Wc = -Wc, ++libbolt_plugin_la_LIBADD = \ ++ $(if $(wildcard $(libiberty_noasan)),$(Wc)$(libiberty_noasan), \ ++ $(if $(wildcard $(libiberty_pic)),$(Wc)$(libiberty_pic),)) ++ ++libbolt_plugin_la_DEPENDENCIES = \ ++ $(if $(wildcard $(libiberty_noasan)),$(libiberty_noasan), \ ++ $(if $(wildcard $(libiberty_pic)),$(libiberty_pic),)) ++ ++LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/../libtool-ldflags $(LDFLAGS)) ++libbolt_plugin_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ ++ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) ${AM_CPPFLAGS} $(CXXFLAGS) \ ++ $(libbolt_plugin_la_LDFLAGS) $(LTLDFLAGS) -o $@ ++ ++all: config.h ++ $(MAKE) $(AM_MAKEFLAGS) all-am ++ ++.SUFFIXES: ++.SUFFIXES: .cc .lo .o .obj ++am--refresh: Makefile ++ @: ++$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) ++ @for dep in $?; do \ ++ case '$(am__configure_deps)' in \ ++ *$$dep*) \ ++ echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ ++ $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ ++ && exit 0; \ ++ exit 1;; \ ++ esac; \ ++ done; \ ++ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ ++ $(am__cd) $(top_srcdir) && \ ++ $(AUTOMAKE) --foreign Makefile ++Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status ++ @case '$?' in \ ++ *config.status*) \ ++ echo ' $(SHELL) ./config.status'; \ ++ $(SHELL) ./config.status;; \ ++ *) \ ++ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles)'; \ ++ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles);; \ ++ esac; ++ ++$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) ++ $(SHELL) ./config.status --recheck ++ ++$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ++ $(am__cd) $(srcdir) && $(AUTOCONF) ++$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) ++ $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) ++$(am__aclocal_m4_deps): ++ ++config.h: stamp-h1 ++ @test -f $@ || rm -f stamp-h1 ++ @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1 ++ ++stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status ++ @rm -f stamp-h1 ++ cd $(top_builddir) && $(SHELL) ./config.status config.h ++$(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ++ ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) ++ rm -f stamp-h1 ++ touch $@ ++ ++distclean-hdr: ++ -rm -f config.h stamp-h1 ++ ++install-libexecsubLTLIBRARIES: $(libexecsub_LTLIBRARIES) ++ @$(NORMAL_INSTALL) ++ @list='$(libexecsub_LTLIBRARIES)'; test -n "$(libexecsubdir)" || list=; \ ++ list2=; for p in $$list; do \ ++ if test -f $$p; then \ ++ list2="$$list2 $$p"; \ ++ else :; fi; \ ++ done; \ ++ test -z "$$list2" || { \ ++ echo " $(MKDIR_P) '$(DESTDIR)$(libexecsubdir)'"; \ ++ $(MKDIR_P) "$(DESTDIR)$(libexecsubdir)" || exit 1; \ ++ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libexecsubdir)'"; \ ++ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libexecsubdir)"; \ ++ } ++ ++uninstall-libexecsubLTLIBRARIES: ++ @$(NORMAL_UNINSTALL) ++ @list='$(libexecsub_LTLIBRARIES)'; test -n "$(libexecsubdir)" || list=; \ ++ for p in $$list; do \ ++ $(am__strip_dir) \ ++ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libexecsubdir)/$$f'"; \ ++ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libexecsubdir)/$$f"; \ ++ done ++ ++clean-libexecsubLTLIBRARIES: ++ -test -z "$(libexecsub_LTLIBRARIES)" || rm -f $(libexecsub_LTLIBRARIES) ++ @list='$(libexecsub_LTLIBRARIES)'; \ ++ locs=`for p in $$list; do echo $$p; done | \ ++ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ ++ sort -u`; \ ++ test -z "$$locs" || { \ ++ echo rm -f $${locs}; \ ++ rm -f $${locs}; \ ++ } ++ ++libbolt_plugin.la: $(libbolt_plugin_la_OBJECTS) $(libbolt_plugin_la_DEPENDENCIES) $(EXTRA_libbolt_plugin_la_DEPENDENCIES) ++ $(AM_V_GEN)$(libbolt_plugin_la_LINK) -rpath $(libexecsubdir) $(libbolt_plugin_la_OBJECTS) $(libbolt_plugin_la_LIBADD) $(LIBS) ++ ++mostlyclean-compile: ++ -rm -f *.$(OBJEXT) ++ ++distclean-compile: ++ -rm -f *.tab.c ++ ++.cc.o: ++ $(AM_V_CXX)$(CXXCOMPILE) -c -o $@ $< ++ ++.cc.obj: ++ $(AM_V_CXX)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` ++ ++.cc.lo: ++ $(AM_V_CXX)$(LTCXXCOMPILE) -c -o $@ $< ++ ++mostlyclean-libtool: ++ -rm -f *.lo ++ ++clean-libtool: ++ -rm -rf .libs _libs ++ ++distclean-libtool: ++ -rm -f libtool config.lt ++ ++ID: $(am__tagged_files) ++ $(am__define_uniq_tagged_files); mkid -fID $$unique ++tags: tags-am ++TAGS: tags ++ ++tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) ++ set x; \ ++ here=`pwd`; \ ++ $(am__define_uniq_tagged_files); \ ++ shift; \ ++ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ ++ test -n "$$unique" || unique=$$empty_fix; \ ++ if test $$# -gt 0; then \ ++ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ ++ "$$@" $$unique; \ ++ else \ ++ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ ++ $$unique; \ ++ fi; \ ++ fi ++ctags: ctags-am ++ ++CTAGS: ctags ++ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) ++ $(am__define_uniq_tagged_files); \ ++ test -z "$(CTAGS_ARGS)$$unique" \ ++ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ ++ $$unique ++ ++GTAGS: ++ here=`$(am__cd) $(top_builddir) && pwd` \ ++ && $(am__cd) $(top_srcdir) \ ++ && gtags -i $(GTAGS_ARGS) "$$here" ++cscope: cscope.files ++ test ! -s cscope.files \ ++ || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) ++clean-cscope: ++ -rm -f cscope.files ++cscope.files: clean-cscope cscopelist ++cscopelist: cscopelist-am ++ ++cscopelist-am: $(am__tagged_files) ++ list='$(am__tagged_files)'; \ ++ case "$(srcdir)" in \ ++ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ ++ *) sdir=$(subdir)/$(srcdir) ;; \ ++ esac; \ ++ for i in $$list; do \ ++ if test -f "$$i"; then \ ++ echo "$(subdir)/$$i"; \ ++ else \ ++ echo "$$sdir/$$i"; \ ++ fi; \ ++ done >> $(top_builddir)/cscope.files ++ ++distclean-tags: ++ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags ++ -rm -f cscope.out cscope.in.out cscope.po.out cscope.files ++check-am: all-am ++check: check-am ++all-am: Makefile $(LTLIBRARIES) config.h ++installdirs: ++ for dir in "$(DESTDIR)$(libexecsubdir)"; do \ ++ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ ++ done ++install: install-am ++install-exec: install-exec-am ++install-data: install-data-am ++uninstall: uninstall-am ++ ++install-am: all-am ++ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am ++ ++installcheck: installcheck-am ++install-strip: ++ if test -z '$(STRIP)'; then \ ++ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ ++ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ ++ install; \ ++ else \ ++ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ ++ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ ++ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ ++ fi ++mostlyclean-generic: ++ ++clean-generic: ++ ++distclean-generic: ++ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) ++ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) ++ ++maintainer-clean-generic: ++ @echo "This command is intended for maintainers to use" ++ @echo "it deletes files that may require special tools to rebuild." ++clean: clean-am ++ ++clean-am: clean-generic clean-libexecsubLTLIBRARIES clean-libtool \ ++ mostlyclean-am ++ ++distclean: distclean-am ++ -rm -f $(am__CONFIG_DISTCLEAN_FILES) ++ -rm -f Makefile ++distclean-am: clean-am distclean-compile distclean-generic \ ++ distclean-hdr distclean-libtool distclean-tags ++ ++dvi: dvi-am ++ ++dvi-am: ++ ++html: html-am ++ ++html-am: ++ ++info: info-am ++ ++info-am: ++ ++install-data-am: ++ ++install-dvi: install-dvi-am ++ ++install-dvi-am: ++ ++install-exec-am: install-libexecsubLTLIBRARIES ++ ++install-html: install-html-am ++ ++install-html-am: ++ ++install-info: install-info-am ++ ++install-info-am: ++ ++install-man: ++ ++install-pdf: install-pdf-am ++ ++install-pdf-am: ++ ++install-ps: install-ps-am ++ ++install-ps-am: ++ ++installcheck-am: ++ ++maintainer-clean: maintainer-clean-am ++ -rm -f $(am__CONFIG_DISTCLEAN_FILES) ++ -rm -rf $(top_srcdir)/autom4te.cache ++ -rm -f Makefile ++maintainer-clean-am: distclean-am maintainer-clean-generic ++ ++mostlyclean: mostlyclean-am ++ ++mostlyclean-am: mostlyclean-compile mostlyclean-generic \ ++ mostlyclean-libtool ++ ++pdf: pdf-am ++ ++pdf-am: ++ ++ps: ps-am ++ ++ps-am: ++ ++uninstall-am: uninstall-libexecsubLTLIBRARIES ++ ++.MAKE: all install-am install-strip ++ ++.PHONY: CTAGS GTAGS TAGS all all-am am--refresh check check-am clean \ ++ clean-cscope clean-generic clean-libexecsubLTLIBRARIES \ ++ clean-libtool cscope cscopelist-am ctags ctags-am distclean \ ++ distclean-compile distclean-generic distclean-hdr \ ++ distclean-libtool distclean-tags dvi dvi-am html html-am info \ ++ info-am install install-am install-data install-data-am \ ++ install-dvi install-dvi-am install-exec install-exec-am \ ++ install-html install-html-am install-info install-info-am \ ++ install-libexecsubLTLIBRARIES install-man install-pdf \ ++ install-pdf-am install-ps install-ps-am install-strip \ ++ installcheck installcheck-am installdirs maintainer-clean \ ++ maintainer-clean-generic mostlyclean mostlyclean-compile \ ++ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ ++ tags tags-am uninstall uninstall-am \ ++ uninstall-libexecsubLTLIBRARIES ++ ++.PRECIOUS: Makefile ++ ++override CXXFLAGS := $(filter-out -fsanitize=address,$(CXXFLAGS)) ++override LDFLAGS := $(filter-out -fsanitize=address,$(LDFLAGS)) ++ ++# Tell versions [3.59,3.63) of GNU make to not export all variables. ++# Otherwise a system limit (for SysV at least) may be exceeded. ++.NOEXPORT: +diff --git a/bolt-plugin/aclocal.m4 b/bolt-plugin/aclocal.m4 +new file mode 100644 +index 000000000..679f2baa4 +--- /dev/null ++++ b/bolt-plugin/aclocal.m4 +@@ -0,0 +1,10250 @@ ++# generated automatically by aclocal 1.16.5 -*- Autoconf -*- ++ ++# Copyright (C) 1996-2021 Free Software Foundation, Inc. ++ ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY, to the extent permitted by law; without ++# even the implied warranty of MERCHANTABILITY or FITNESS FOR A ++# PARTICULAR PURPOSE. ++ ++m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) ++m4_ifndef([AC_AUTOCONF_VERSION], ++ [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl ++m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.71],, ++[m4_warning([this file was generated for autoconf 2.71. ++You have another version of autoconf. It may work, but is not guaranteed to. ++If you have problems, you may need to regenerate the build system entirely. ++To do so, use the procedure documented by the package, typically 'autoreconf'.])]) ++ ++# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- ++# ++# Copyright (C) 1996-2001, 2003-2019, 2021-2022 Free Software ++# Foundation, Inc. ++# Written by Gordon Matzigkeit, 1996 ++# ++# This file is free software; the Free Software Foundation gives ++# unlimited permission to copy and/or distribute it, with or without ++# modifications, as long as this notice is preserved. ++ ++m4_define([_LT_COPYING], [dnl ++# Copyright (C) 2014 Free Software Foundation, Inc. ++# This is free software; see the source for copying conditions. There is NO ++# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++ ++# GNU Libtool is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 2 of of the License, or ++# (at your option) any later version. ++# ++# As a special exception to the GNU General Public License, if you ++# distribute this file as part of a program or library that is built ++# using GNU Libtool, you may include this file under the same ++# distribution terms that you use for the rest of that program. ++# ++# GNU Libtool is distributed in the hope that it will be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program. If not, see . ++]) ++ ++# serial 59 LT_INIT ++ ++ ++# LT_PREREQ(VERSION) ++# ------------------ ++# Complain and exit if this libtool version is less that VERSION. ++m4_defun([LT_PREREQ], ++[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, ++ [m4_default([$3], ++ [m4_fatal([Libtool version $1 or higher is required], ++ 63)])], ++ [$2])]) ++ ++ ++# _LT_CHECK_BUILDDIR ++# ------------------ ++# Complain if the absolute build directory name contains unusual characters ++m4_defun([_LT_CHECK_BUILDDIR], ++[case `pwd` in ++ *\ * | *\ *) ++ AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; ++esac ++]) ++ ++ ++# LT_INIT([OPTIONS]) ++# ------------------ ++AC_DEFUN([LT_INIT], ++[AC_PREREQ([2.62])dnl We use AC_PATH_PROGS_FEATURE_CHECK ++AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl ++AC_BEFORE([$0], [LT_LANG])dnl ++AC_BEFORE([$0], [LT_OUTPUT])dnl ++AC_BEFORE([$0], [LTDL_INIT])dnl ++m4_require([_LT_CHECK_BUILDDIR])dnl ++ ++dnl Autoconf doesn't catch unexpanded LT_ macros by default: ++m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl ++m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl ++dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 ++dnl unless we require an AC_DEFUNed macro: ++AC_REQUIRE([LTOPTIONS_VERSION])dnl ++AC_REQUIRE([LTSUGAR_VERSION])dnl ++AC_REQUIRE([LTVERSION_VERSION])dnl ++AC_REQUIRE([LTOBSOLETE_VERSION])dnl ++m4_require([_LT_PROG_LTMAIN])dnl ++ ++_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}]) ++ ++dnl Parse OPTIONS ++_LT_SET_OPTIONS([$0], [$1]) ++ ++# This can be used to rebuild libtool when needed ++LIBTOOL_DEPS=$ltmain ++ ++# Always use our own libtool. ++LIBTOOL='$(SHELL) $(top_builddir)/libtool' ++AC_SUBST(LIBTOOL)dnl ++ ++_LT_SETUP ++ ++# Only expand once: ++m4_define([LT_INIT]) ++])# LT_INIT ++ ++# Old names: ++AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) ++AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AC_PROG_LIBTOOL], []) ++dnl AC_DEFUN([AM_PROG_LIBTOOL], []) ++ ++ ++# _LT_PREPARE_CC_BASENAME ++# ----------------------- ++m4_defun([_LT_PREPARE_CC_BASENAME], [ ++# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. ++func_cc_basename () ++{ ++ for cc_temp in @S|@*""; do ++ case $cc_temp in ++ compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; ++ distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; ++ \-*) ;; ++ *) break;; ++ esac ++ done ++ func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` ++} ++])# _LT_PREPARE_CC_BASENAME ++ ++ ++# _LT_CC_BASENAME(CC) ++# ------------------- ++# It would be clearer to call AC_REQUIREs from _LT_PREPARE_CC_BASENAME, ++# but that macro is also expanded into generated libtool script, which ++# arranges for $SED and $ECHO to be set by different means. ++m4_defun([_LT_CC_BASENAME], ++[m4_require([_LT_PREPARE_CC_BASENAME])dnl ++AC_REQUIRE([_LT_DECL_SED])dnl ++AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl ++func_cc_basename $1 ++cc_basename=$func_cc_basename_result ++]) ++ ++ ++# _LT_FILEUTILS_DEFAULTS ++# ---------------------- ++# It is okay to use these file commands and assume they have been set ++# sensibly after 'm4_require([_LT_FILEUTILS_DEFAULTS])'. ++m4_defun([_LT_FILEUTILS_DEFAULTS], ++[: ${CP="cp -f"} ++: ${MV="mv -f"} ++: ${RM="rm -f"} ++])# _LT_FILEUTILS_DEFAULTS ++ ++ ++# _LT_SETUP ++# --------- ++m4_defun([_LT_SETUP], ++[AC_REQUIRE([AC_CANONICAL_HOST])dnl ++AC_REQUIRE([AC_CANONICAL_BUILD])dnl ++AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl ++AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl ++ ++_LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl ++dnl ++_LT_DECL([], [host_alias], [0], [The host system])dnl ++_LT_DECL([], [host], [0])dnl ++_LT_DECL([], [host_os], [0])dnl ++dnl ++_LT_DECL([], [build_alias], [0], [The build system])dnl ++_LT_DECL([], [build], [0])dnl ++_LT_DECL([], [build_os], [0])dnl ++dnl ++AC_REQUIRE([AC_PROG_CC])dnl ++AC_REQUIRE([LT_PATH_LD])dnl ++AC_REQUIRE([LT_PATH_NM])dnl ++dnl ++AC_REQUIRE([AC_PROG_LN_S])dnl ++test -z "$LN_S" && LN_S="ln -s" ++_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl ++dnl ++AC_REQUIRE([LT_CMD_MAX_LEN])dnl ++_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl ++_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl ++dnl ++m4_require([_LT_FILEUTILS_DEFAULTS])dnl ++m4_require([_LT_CHECK_SHELL_FEATURES])dnl ++m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl ++m4_require([_LT_CMD_RELOAD])dnl ++m4_require([_LT_DECL_FILECMD])dnl ++m4_require([_LT_CHECK_MAGIC_METHOD])dnl ++m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl ++m4_require([_LT_CMD_OLD_ARCHIVE])dnl ++m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl ++m4_require([_LT_WITH_SYSROOT])dnl ++m4_require([_LT_CMD_TRUNCATE])dnl ++ ++_LT_CONFIG_LIBTOOL_INIT([ ++# See if we are running on zsh, and set the options that allow our ++# commands through without removal of \ escapes INIT. ++if test -n "\${ZSH_VERSION+set}"; then ++ setopt NO_GLOB_SUBST ++fi ++]) ++if test -n "${ZSH_VERSION+set}"; then ++ setopt NO_GLOB_SUBST ++fi ++ ++_LT_CHECK_OBJDIR ++ ++m4_require([_LT_TAG_COMPILER])dnl ++ ++case $host_os in ++aix3*) ++ # AIX sometimes has problems with the GCC collect2 program. For some ++ # reason, if we set the COLLECT_NAMES environment variable, the problems ++ # vanish in a puff of smoke. ++ if test set != "${COLLECT_NAMES+set}"; then ++ COLLECT_NAMES= ++ export COLLECT_NAMES ++ fi ++ ;; ++esac ++ ++# Global variables: ++ofile=libtool ++can_build_shared=yes ++ ++# All known linkers require a '.a' archive for static linking (except MSVC and ++# ICC, which need '.lib'). ++libext=a ++ ++with_gnu_ld=$lt_cv_prog_gnu_ld ++ ++old_CC=$CC ++old_CFLAGS=$CFLAGS ++ ++# Set sane defaults for various variables ++test -z "$CC" && CC=cc ++test -z "$LTCC" && LTCC=$CC ++test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS ++test -z "$LD" && LD=ld ++test -z "$ac_objext" && ac_objext=o ++ ++_LT_CC_BASENAME([$compiler]) ++ ++# Only perform the check for file, if the check method requires it ++test -z "$MAGIC_CMD" && MAGIC_CMD=file ++case $deplibs_check_method in ++file_magic*) ++ if test "$file_magic_cmd" = '$MAGIC_CMD'; then ++ _LT_PATH_MAGIC ++ fi ++ ;; ++esac ++ ++# Use C for the default configuration in the libtool script ++LT_SUPPORTED_TAG([CC]) ++_LT_LANG_C_CONFIG ++_LT_LANG_DEFAULT_CONFIG ++_LT_CONFIG_COMMANDS ++])# _LT_SETUP ++ ++ ++# _LT_PREPARE_SED_QUOTE_VARS ++# -------------------------- ++# Define a few sed substitution that help us do robust quoting. ++m4_defun([_LT_PREPARE_SED_QUOTE_VARS], ++[# Backslashify metacharacters that are still active within ++# double-quoted strings. ++sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' ++ ++# Same as above, but do not quote variable references. ++double_quote_subst='s/\([["`\\]]\)/\\\1/g' ++ ++# Sed substitution to delay expansion of an escaped shell variable in a ++# double_quote_subst'ed string. ++delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' ++ ++# Sed substitution to delay expansion of an escaped single quote. ++delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' ++ ++# Sed substitution to avoid accidental globbing in evaled expressions ++no_glob_subst='s/\*/\\\*/g' ++]) ++ ++# _LT_PROG_LTMAIN ++# --------------- ++# Note that this code is called both from 'configure', and 'config.status' ++# now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, ++# 'config.status' has no value for ac_aux_dir unless we are using Automake, ++# so we pass a copy along to make sure it has a sensible value anyway. ++m4_defun([_LT_PROG_LTMAIN], ++[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl ++_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) ++ltmain=$ac_aux_dir/ltmain.sh ++])# _LT_PROG_LTMAIN ++ ++ ++ ++# So that we can recreate a full libtool script including additional ++# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS ++# in macros and then make a single call at the end using the 'libtool' ++# label. ++ ++ ++# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) ++# ---------------------------------------- ++# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. ++m4_define([_LT_CONFIG_LIBTOOL_INIT], ++[m4_ifval([$1], ++ [m4_append([_LT_OUTPUT_LIBTOOL_INIT], ++ [$1 ++])])]) ++ ++# Initialize. ++m4_define([_LT_OUTPUT_LIBTOOL_INIT]) ++ ++ ++# _LT_CONFIG_LIBTOOL([COMMANDS]) ++# ------------------------------ ++# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. ++m4_define([_LT_CONFIG_LIBTOOL], ++[m4_ifval([$1], ++ [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], ++ [$1 ++])])]) ++ ++# Initialize. ++m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) ++ ++ ++# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) ++# ----------------------------------------------------- ++m4_defun([_LT_CONFIG_SAVE_COMMANDS], ++[_LT_CONFIG_LIBTOOL([$1]) ++_LT_CONFIG_LIBTOOL_INIT([$2]) ++]) ++ ++ ++# _LT_FORMAT_COMMENT([COMMENT]) ++# ----------------------------- ++# Add leading comment marks to the start of each line, and a trailing ++# full-stop to the whole comment if one is not present already. ++m4_define([_LT_FORMAT_COMMENT], ++[m4_ifval([$1], [ ++m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], ++ [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) ++)]) ++ ++ ++ ++ ++ ++# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) ++# ------------------------------------------------------------------- ++# CONFIGNAME is the name given to the value in the libtool script. ++# VARNAME is the (base) name used in the configure script. ++# VALUE may be 0, 1 or 2 for a computed quote escaped value based on ++# VARNAME. Any other value will be used directly. ++m4_define([_LT_DECL], ++[lt_if_append_uniq([lt_decl_varnames], [$2], [, ], ++ [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], ++ [m4_ifval([$1], [$1], [$2])]) ++ lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) ++ m4_ifval([$4], ++ [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) ++ lt_dict_add_subkey([lt_decl_dict], [$2], ++ [tagged?], [m4_ifval([$5], [yes], [no])])]) ++]) ++ ++ ++# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) ++# -------------------------------------------------------- ++m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) ++ ++ ++# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) ++# ------------------------------------------------ ++m4_define([lt_decl_tag_varnames], ++[_lt_decl_filter([tagged?], [yes], $@)]) ++ ++ ++# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) ++# --------------------------------------------------------- ++m4_define([_lt_decl_filter], ++[m4_case([$#], ++ [0], [m4_fatal([$0: too few arguments: $#])], ++ [1], [m4_fatal([$0: too few arguments: $#: $1])], ++ [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], ++ [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], ++ [lt_dict_filter([lt_decl_dict], $@)])[]dnl ++]) ++ ++ ++# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) ++# -------------------------------------------------- ++m4_define([lt_decl_quote_varnames], ++[_lt_decl_filter([value], [1], $@)]) ++ ++ ++# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) ++# --------------------------------------------------- ++m4_define([lt_decl_dquote_varnames], ++[_lt_decl_filter([value], [2], $@)]) ++ ++ ++# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) ++# --------------------------------------------------- ++m4_define([lt_decl_varnames_tagged], ++[m4_assert([$# <= 2])dnl ++_$0(m4_quote(m4_default([$1], [[, ]])), ++ m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), ++ m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) ++m4_define([_lt_decl_varnames_tagged], ++[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) ++ ++ ++# lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) ++# ------------------------------------------------ ++m4_define([lt_decl_all_varnames], ++[_$0(m4_quote(m4_default([$1], [[, ]])), ++ m4_if([$2], [], ++ m4_quote(lt_decl_varnames), ++ m4_quote(m4_shift($@))))[]dnl ++]) ++m4_define([_lt_decl_all_varnames], ++[lt_join($@, lt_decl_varnames_tagged([$1], ++ lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl ++]) ++ ++ ++# _LT_CONFIG_STATUS_DECLARE([VARNAME]) ++# ------------------------------------ ++# Quote a variable value, and forward it to 'config.status' so that its ++# declaration there will have the same value as in 'configure'. VARNAME ++# must have a single quote delimited value for this to work. ++m4_define([_LT_CONFIG_STATUS_DECLARE], ++[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`']) ++ ++ ++# _LT_CONFIG_STATUS_DECLARATIONS ++# ------------------------------ ++# We delimit libtool config variables with single quotes, so when ++# we write them to config.status, we have to be sure to quote all ++# embedded single quotes properly. In configure, this macro expands ++# each variable declared with _LT_DECL (and _LT_TAGDECL) into: ++# ++# ='`$ECHO "$" | $SED "$delay_single_quote_subst"`' ++m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], ++[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), ++ [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) ++ ++ ++# _LT_LIBTOOL_TAGS ++# ---------------- ++# Output comment and list of tags supported by the script ++m4_defun([_LT_LIBTOOL_TAGS], ++[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl ++available_tags='_LT_TAGS'dnl ++]) ++ ++ ++# _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) ++# ----------------------------------- ++# Extract the dictionary values for VARNAME (optionally with TAG) and ++# expand to a commented shell variable setting: ++# ++# # Some comment about what VAR is for. ++# visible_name=$lt_internal_name ++m4_define([_LT_LIBTOOL_DECLARE], ++[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], ++ [description])))[]dnl ++m4_pushdef([_libtool_name], ++ m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl ++m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), ++ [0], [_libtool_name=[$]$1], ++ [1], [_libtool_name=$lt_[]$1], ++ [2], [_libtool_name=$lt_[]$1], ++ [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl ++m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl ++]) ++ ++ ++# _LT_LIBTOOL_CONFIG_VARS ++# ----------------------- ++# Produce commented declarations of non-tagged libtool config variables ++# suitable for insertion in the LIBTOOL CONFIG section of the 'libtool' ++# script. Tagged libtool config variables (even for the LIBTOOL CONFIG ++# section) are produced by _LT_LIBTOOL_TAG_VARS. ++m4_defun([_LT_LIBTOOL_CONFIG_VARS], ++[m4_foreach([_lt_var], ++ m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), ++ [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) ++ ++ ++# _LT_LIBTOOL_TAG_VARS(TAG) ++# ------------------------- ++m4_define([_LT_LIBTOOL_TAG_VARS], ++[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), ++ [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) ++ ++ ++# _LT_TAGVAR(VARNAME, [TAGNAME]) ++# ------------------------------ ++m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) ++ ++ ++# _LT_CONFIG_COMMANDS ++# ------------------- ++# Send accumulated output to $CONFIG_STATUS. Thanks to the lists of ++# variables for single and double quote escaping we saved from calls ++# to _LT_DECL, we can put quote escaped variables declarations ++# into 'config.status', and then the shell code to quote escape them in ++# for loops in 'config.status'. Finally, any additional code accumulated ++# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. ++m4_defun([_LT_CONFIG_COMMANDS], ++[AC_PROVIDE_IFELSE([LT_OUTPUT], ++ dnl If the libtool generation code has been placed in $CONFIG_LT, ++ dnl instead of duplicating it all over again into config.status, ++ dnl then we will have config.status run $CONFIG_LT later, so it ++ dnl needs to know what name is stored there: ++ [AC_CONFIG_COMMANDS([libtool], ++ [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], ++ dnl If the libtool generation code is destined for config.status, ++ dnl expand the accumulated commands and init code now: ++ [AC_CONFIG_COMMANDS([libtool], ++ [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) ++])#_LT_CONFIG_COMMANDS ++ ++ ++# Initialize. ++m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], ++[ ++ ++# The HP-UX ksh and POSIX shell print the target directory to stdout ++# if CDPATH is set. ++(unset CDPATH) >/dev/null 2>&1 && unset CDPATH ++ ++sed_quote_subst='$sed_quote_subst' ++double_quote_subst='$double_quote_subst' ++delay_variable_subst='$delay_variable_subst' ++_LT_CONFIG_STATUS_DECLARATIONS ++LTCC='$LTCC' ++LTCFLAGS='$LTCFLAGS' ++compiler='$compiler_DEFAULT' ++ ++# A function that is used when there is no print builtin or printf. ++func_fallback_echo () ++{ ++ eval 'cat <<_LTECHO_EOF ++\$[]1 ++_LTECHO_EOF' ++} ++ ++# Quote evaled strings. ++for var in lt_decl_all_varnames([[ \ ++]], lt_decl_quote_varnames); do ++ case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in ++ *[[\\\\\\\`\\"\\\$]]*) ++ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ++ ;; ++ *) ++ eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ++ ;; ++ esac ++done ++ ++# Double-quote double-evaled strings. ++for var in lt_decl_all_varnames([[ \ ++]], lt_decl_dquote_varnames); do ++ case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in ++ *[[\\\\\\\`\\"\\\$]]*) ++ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ++ ;; ++ *) ++ eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ++ ;; ++ esac ++done ++ ++_LT_OUTPUT_LIBTOOL_INIT ++]) ++ ++# _LT_GENERATED_FILE_INIT(FILE, [COMMENT]) ++# ------------------------------------ ++# Generate a child script FILE with all initialization necessary to ++# reuse the environment learned by the parent script, and make the ++# file executable. If COMMENT is supplied, it is inserted after the ++# '#!' sequence but before initialization text begins. After this ++# macro, additional text can be appended to FILE to form the body of ++# the child script. The macro ends with non-zero status if the ++# file could not be fully written (such as if the disk is full). ++m4_ifdef([AS_INIT_GENERATED], ++[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])], ++[m4_defun([_LT_GENERATED_FILE_INIT], ++[m4_require([AS_PREPARE])]dnl ++[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl ++[lt_write_fail=0 ++cat >$1 <<_ASEOF || lt_write_fail=1 ++#! $SHELL ++# Generated by $as_me. ++$2 ++SHELL=\${CONFIG_SHELL-$SHELL} ++export SHELL ++_ASEOF ++cat >>$1 <<\_ASEOF || lt_write_fail=1 ++AS_SHELL_SANITIZE ++_AS_PREPARE ++exec AS_MESSAGE_FD>&1 ++_ASEOF ++test 0 = "$lt_write_fail" && chmod +x $1[]dnl ++m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT ++ ++# LT_OUTPUT ++# --------- ++# This macro allows early generation of the libtool script (before ++# AC_OUTPUT is called), incase it is used in configure for compilation ++# tests. ++AC_DEFUN([LT_OUTPUT], ++[: ${CONFIG_LT=./config.lt} ++AC_MSG_NOTICE([creating $CONFIG_LT]) ++_LT_GENERATED_FILE_INIT(["$CONFIG_LT"], ++[# Run this file to recreate a libtool stub with the current configuration.]) ++ ++cat >>"$CONFIG_LT" <<\_LTEOF ++lt_cl_silent=false ++exec AS_MESSAGE_LOG_FD>>config.log ++{ ++ echo ++ AS_BOX([Running $as_me.]) ++} >&AS_MESSAGE_LOG_FD ++ ++lt_cl_help="\ ++'$as_me' creates a local libtool stub from the current configuration, ++for use in further configure time tests before the real libtool is ++generated. ++ ++Usage: $[0] [[OPTIONS]] ++ ++ -h, --help print this help, then exit ++ -V, --version print version number, then exit ++ -q, --quiet do not print progress messages ++ -d, --debug don't remove temporary files ++ ++Report bugs to ." ++ ++lt_cl_version="\ ++m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl ++m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) ++configured by $[0], generated by m4_PACKAGE_STRING. ++ ++Copyright (C) 2011 Free Software Foundation, Inc. ++This config.lt script is free software; the Free Software Foundation ++gives unlimited permision to copy, distribute and modify it." ++ ++while test 0 != $[#] ++do ++ case $[1] in ++ --version | --v* | -V ) ++ echo "$lt_cl_version"; exit 0 ;; ++ --help | --h* | -h ) ++ echo "$lt_cl_help"; exit 0 ;; ++ --debug | --d* | -d ) ++ debug=: ;; ++ --quiet | --q* | --silent | --s* | -q ) ++ lt_cl_silent=: ;; ++ ++ -*) AC_MSG_ERROR([unrecognized option: $[1] ++Try '$[0] --help' for more information.]) ;; ++ ++ *) AC_MSG_ERROR([unrecognized argument: $[1] ++Try '$[0] --help' for more information.]) ;; ++ esac ++ shift ++done ++ ++if $lt_cl_silent; then ++ exec AS_MESSAGE_FD>/dev/null ++fi ++_LTEOF ++ ++cat >>"$CONFIG_LT" <<_LTEOF ++_LT_OUTPUT_LIBTOOL_COMMANDS_INIT ++_LTEOF ++ ++cat >>"$CONFIG_LT" <<\_LTEOF ++AC_MSG_NOTICE([creating $ofile]) ++_LT_OUTPUT_LIBTOOL_COMMANDS ++AS_EXIT(0) ++_LTEOF ++chmod +x "$CONFIG_LT" ++ ++# configure is writing to config.log, but config.lt does its own redirection, ++# appending to config.log, which fails on DOS, as config.log is still kept ++# open by configure. Here we exec the FD to /dev/null, effectively closing ++# config.log, so it can be properly (re)opened and appended to by config.lt. ++lt_cl_success=: ++test yes = "$silent" && ++ lt_config_lt_args="$lt_config_lt_args --quiet" ++exec AS_MESSAGE_LOG_FD>/dev/null ++$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false ++exec AS_MESSAGE_LOG_FD>>config.log ++$lt_cl_success || AS_EXIT(1) ++])# LT_OUTPUT ++ ++ ++# _LT_CONFIG(TAG) ++# --------------- ++# If TAG is the built-in tag, create an initial libtool script with a ++# default configuration from the untagged config vars. Otherwise add code ++# to config.status for appending the configuration named by TAG from the ++# matching tagged config vars. ++m4_defun([_LT_CONFIG], ++[m4_require([_LT_FILEUTILS_DEFAULTS])dnl ++_LT_CONFIG_SAVE_COMMANDS([ ++ m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl ++ m4_if(_LT_TAG, [C], [ ++ # See if we are running on zsh, and set the options that allow our ++ # commands through without removal of \ escapes. ++ if test -n "${ZSH_VERSION+set}"; then ++ setopt NO_GLOB_SUBST ++ fi ++ ++ cfgfile=${ofile}T ++ trap "$RM \"$cfgfile\"; exit 1" 1 2 15 ++ $RM "$cfgfile" ++ ++ cat <<_LT_EOF >> "$cfgfile" ++#! $SHELL ++# Generated automatically by $as_me ($PACKAGE) $VERSION ++# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: ++# NOTE: Changes made to this file will be lost: look at ltmain.sh. ++ ++# Provide generalized library-building support services. ++# Written by Gordon Matzigkeit, 1996 ++ ++_LT_COPYING ++_LT_LIBTOOL_TAGS ++ ++# Configured defaults for sys_lib_dlsearch_path munging. ++: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} ++ ++# ### BEGIN LIBTOOL CONFIG ++_LT_LIBTOOL_CONFIG_VARS ++_LT_LIBTOOL_TAG_VARS ++# ### END LIBTOOL CONFIG ++ ++_LT_EOF ++ ++ cat <<'_LT_EOF' >> "$cfgfile" ++ ++# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE ++ ++_LT_PREPARE_MUNGE_PATH_LIST ++_LT_PREPARE_CC_BASENAME ++ ++# ### END FUNCTIONS SHARED WITH CONFIGURE ++ ++_LT_EOF ++ ++ case $host_os in ++ aix3*) ++ cat <<\_LT_EOF >> "$cfgfile" ++# AIX sometimes has problems with the GCC collect2 program. For some ++# reason, if we set the COLLECT_NAMES environment variable, the problems ++# vanish in a puff of smoke. ++if test set != "${COLLECT_NAMES+set}"; then ++ COLLECT_NAMES= ++ export COLLECT_NAMES ++fi ++_LT_EOF ++ ;; ++ esac ++ ++ _LT_PROG_LTMAIN ++ ++ # We use sed instead of cat because bash on DJGPP gets confused if ++ # if finds mixed CR/LF and LF-only lines. Since sed operates in ++ # text mode, it properly converts lines to CR/LF. This bash problem ++ # is reportedly fixed, but why not run on old versions too? ++ $SED '$q' "$ltmain" >> "$cfgfile" \ ++ || (rm -f "$cfgfile"; exit 1) ++ ++ mv -f "$cfgfile" "$ofile" || ++ (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") ++ chmod +x "$ofile" ++], ++[cat <<_LT_EOF >> "$ofile" ++ ++dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded ++dnl in a comment (ie after a #). ++# ### BEGIN LIBTOOL TAG CONFIG: $1 ++_LT_LIBTOOL_TAG_VARS(_LT_TAG) ++# ### END LIBTOOL TAG CONFIG: $1 ++_LT_EOF ++])dnl /m4_if ++], ++[m4_if([$1], [], [ ++ PACKAGE='$PACKAGE' ++ VERSION='$VERSION' ++ RM='$RM' ++ ofile='$ofile'], []) ++])dnl /_LT_CONFIG_SAVE_COMMANDS ++])# _LT_CONFIG ++ ++ ++# LT_SUPPORTED_TAG(TAG) ++# --------------------- ++# Trace this macro to discover what tags are supported by the libtool ++# --tag option, using: ++# autoconf --trace 'LT_SUPPORTED_TAG:$1' ++AC_DEFUN([LT_SUPPORTED_TAG], []) ++ ++ ++# C support is built-in for now ++m4_define([_LT_LANG_C_enabled], []) ++m4_define([_LT_TAGS], []) ++ ++ ++# LT_LANG(LANG) ++# ------------- ++# Enable libtool support for the given language if not already enabled. ++AC_DEFUN([LT_LANG], ++[AC_BEFORE([$0], [LT_OUTPUT])dnl ++m4_case([$1], ++ [C], [_LT_LANG(C)], ++ [C++], [_LT_LANG(CXX)], ++ [Go], [_LT_LANG(GO)], ++ [Java], [_LT_LANG(GCJ)], ++ [Fortran 77], [_LT_LANG(F77)], ++ [Fortran], [_LT_LANG(FC)], ++ [Windows Resource], [_LT_LANG(RC)], ++ [m4_ifdef([_LT_LANG_]$1[_CONFIG], ++ [_LT_LANG($1)], ++ [m4_fatal([$0: unsupported language: "$1"])])])dnl ++])# LT_LANG ++ ++ ++# _LT_LANG(LANGNAME) ++# ------------------ ++m4_defun([_LT_LANG], ++[m4_ifdef([_LT_LANG_]$1[_enabled], [], ++ [LT_SUPPORTED_TAG([$1])dnl ++ m4_append([_LT_TAGS], [$1 ])dnl ++ m4_define([_LT_LANG_]$1[_enabled], [])dnl ++ _LT_LANG_$1_CONFIG($1)])dnl ++])# _LT_LANG ++ ++ ++m4_ifndef([AC_PROG_GO], [ ++# NOTE: This macro has been submitted for inclusion into # ++# GNU Autoconf as AC_PROG_GO. When it is available in # ++# a released version of Autoconf we should remove this # ++# macro and use it instead. # ++m4_defun([AC_PROG_GO], ++[AC_LANG_PUSH(Go)dnl ++AC_ARG_VAR([GOC], [Go compiler command])dnl ++AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl ++_AC_ARG_VAR_LDFLAGS()dnl ++AC_CHECK_TOOL(GOC, gccgo) ++if test -z "$GOC"; then ++ if test -n "$ac_tool_prefix"; then ++ AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo]) ++ fi ++fi ++if test -z "$GOC"; then ++ AC_CHECK_PROG(GOC, gccgo, gccgo, false) ++fi ++])#m4_defun ++])#m4_ifndef ++ ++ ++# _LT_LANG_DEFAULT_CONFIG ++# ----------------------- ++m4_defun([_LT_LANG_DEFAULT_CONFIG], ++[AC_PROVIDE_IFELSE([AC_PROG_CXX], ++ [LT_LANG(CXX)], ++ [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) ++ ++AC_PROVIDE_IFELSE([AC_PROG_F77], ++ [LT_LANG(F77)], ++ [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) ++ ++AC_PROVIDE_IFELSE([AC_PROG_FC], ++ [LT_LANG(FC)], ++ [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) ++ ++dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal ++dnl pulling things in needlessly. ++AC_PROVIDE_IFELSE([AC_PROG_GCJ], ++ [LT_LANG(GCJ)], ++ [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], ++ [LT_LANG(GCJ)], ++ [AC_PROVIDE_IFELSE([LT_PROG_GCJ], ++ [LT_LANG(GCJ)], ++ [m4_ifdef([AC_PROG_GCJ], ++ [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) ++ m4_ifdef([A][M_PROG_GCJ], ++ [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) ++ m4_ifdef([LT_PROG_GCJ], ++ [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) ++ ++AC_PROVIDE_IFELSE([AC_PROG_GO], ++ [LT_LANG(GO)], ++ [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])]) ++ ++AC_PROVIDE_IFELSE([LT_PROG_RC], ++ [LT_LANG(RC)], ++ [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) ++])# _LT_LANG_DEFAULT_CONFIG ++ ++# Obsolete macros: ++AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) ++AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) ++AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) ++AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) ++AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)]) ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AC_LIBTOOL_CXX], []) ++dnl AC_DEFUN([AC_LIBTOOL_F77], []) ++dnl AC_DEFUN([AC_LIBTOOL_FC], []) ++dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) ++dnl AC_DEFUN([AC_LIBTOOL_RC], []) ++ ++ ++# _LT_TAG_COMPILER ++# ---------------- ++m4_defun([_LT_TAG_COMPILER], ++[AC_REQUIRE([AC_PROG_CC])dnl ++ ++_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl ++_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl ++_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl ++_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl ++ ++# If no C compiler was specified, use CC. ++LTCC=${LTCC-"$CC"} ++ ++# If no C compiler flags were specified, use CFLAGS. ++LTCFLAGS=${LTCFLAGS-"$CFLAGS"} ++ ++# Allow CC to be a program name with arguments. ++compiler=$CC ++])# _LT_TAG_COMPILER ++ ++ ++# _LT_COMPILER_BOILERPLATE ++# ------------------------ ++# Check for compiler boilerplate output or warnings with ++# the simple compiler test code. ++m4_defun([_LT_COMPILER_BOILERPLATE], ++[m4_require([_LT_DECL_SED])dnl ++ac_outfile=conftest.$ac_objext ++echo "$lt_simple_compile_test_code" >conftest.$ac_ext ++eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err ++_lt_compiler_boilerplate=`cat conftest.err` ++$RM conftest* ++])# _LT_COMPILER_BOILERPLATE ++ ++ ++# _LT_LINKER_BOILERPLATE ++# ---------------------- ++# Check for linker boilerplate output or warnings with ++# the simple link test code. ++m4_defun([_LT_LINKER_BOILERPLATE], ++[m4_require([_LT_DECL_SED])dnl ++ac_outfile=conftest.$ac_objext ++echo "$lt_simple_link_test_code" >conftest.$ac_ext ++eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err ++_lt_linker_boilerplate=`cat conftest.err` ++$RM -r conftest* ++])# _LT_LINKER_BOILERPLATE ++ ++# _LT_REQUIRED_DARWIN_CHECKS ++# ------------------------- ++m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ ++ case $host_os in ++ rhapsody* | darwin*) ++ AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) ++ AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) ++ AC_CHECK_TOOL([LIPO], [lipo], [:]) ++ AC_CHECK_TOOL([OTOOL], [otool], [:]) ++ AC_CHECK_TOOL([OTOOL64], [otool64], [:]) ++ _LT_DECL([], [DSYMUTIL], [1], ++ [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) ++ _LT_DECL([], [NMEDIT], [1], ++ [Tool to change global to local symbols on Mac OS X]) ++ _LT_DECL([], [LIPO], [1], ++ [Tool to manipulate fat objects and archives on Mac OS X]) ++ _LT_DECL([], [OTOOL], [1], ++ [ldd/readelf like tool for Mach-O binaries on Mac OS X]) ++ _LT_DECL([], [OTOOL64], [1], ++ [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) ++ ++ AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], ++ [lt_cv_apple_cc_single_mod=no ++ if test -z "$LT_MULTI_MODULE"; then ++ # By default we will add the -single_module flag. You can override ++ # by either setting the environment variable LT_MULTI_MODULE ++ # non-empty at configure time, or by adding -multi_module to the ++ # link flags. ++ rm -rf libconftest.dylib* ++ echo "int foo(void){return 1;}" > conftest.c ++ echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ ++-dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD ++ $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ ++ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err ++ _lt_result=$? ++ # If there is a non-empty error log, and "single_module" ++ # appears in it, assume the flag caused a linker warning ++ if test -s conftest.err && $GREP single_module conftest.err; then ++ cat conftest.err >&AS_MESSAGE_LOG_FD ++ # Otherwise, if the output was created with a 0 exit code from ++ # the compiler, it worked. ++ elif test -f libconftest.dylib && test 0 = "$_lt_result"; then ++ lt_cv_apple_cc_single_mod=yes ++ else ++ cat conftest.err >&AS_MESSAGE_LOG_FD ++ fi ++ rm -rf libconftest.dylib* ++ rm -f conftest.* ++ fi]) ++ ++ AC_CACHE_CHECK([for -exported_symbols_list linker flag], ++ [lt_cv_ld_exported_symbols_list], ++ [lt_cv_ld_exported_symbols_list=no ++ save_LDFLAGS=$LDFLAGS ++ echo "_main" > conftest.sym ++ LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" ++ AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], ++ [lt_cv_ld_exported_symbols_list=yes], ++ [lt_cv_ld_exported_symbols_list=no]) ++ LDFLAGS=$save_LDFLAGS ++ ]) ++ ++ AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load], ++ [lt_cv_ld_force_load=no ++ cat > conftest.c << _LT_EOF ++int forced_loaded() { return 2;} ++_LT_EOF ++ echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD ++ $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD ++ echo "$AR $AR_FLAGS libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD ++ $AR $AR_FLAGS libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD ++ echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD ++ $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD ++ cat > conftest.c << _LT_EOF ++int main() { return 0;} ++_LT_EOF ++ echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD ++ $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err ++ _lt_result=$? ++ if test -s conftest.err && $GREP force_load conftest.err; then ++ cat conftest.err >&AS_MESSAGE_LOG_FD ++ elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then ++ lt_cv_ld_force_load=yes ++ else ++ cat conftest.err >&AS_MESSAGE_LOG_FD ++ fi ++ rm -f conftest.err libconftest.a conftest conftest.c ++ rm -rf conftest.dSYM ++ ]) ++ case $host_os in ++ rhapsody* | darwin1.[[012]]) ++ _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; ++ darwin1.*) ++ _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; ++ darwin*) ++ case $MACOSX_DEPLOYMENT_TARGET,$host in ++ 10.[[012]],*|,*powerpc*-darwin[[5-8]]*) ++ _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; ++ *) ++ _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; ++ esac ++ ;; ++ esac ++ if test yes = "$lt_cv_apple_cc_single_mod"; then ++ _lt_dar_single_mod='$single_module' ++ fi ++ if test yes = "$lt_cv_ld_exported_symbols_list"; then ++ _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' ++ else ++ _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' ++ fi ++ if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then ++ _lt_dsymutil='~$DSYMUTIL $lib || :' ++ else ++ _lt_dsymutil= ++ fi ++ ;; ++ esac ++]) ++ ++ ++# _LT_DARWIN_LINKER_FEATURES([TAG]) ++# --------------------------------- ++# Checks for linker and compiler features on darwin ++m4_defun([_LT_DARWIN_LINKER_FEATURES], ++[ ++ m4_require([_LT_REQUIRED_DARWIN_CHECKS]) ++ _LT_TAGVAR(archive_cmds_need_lc, $1)=no ++ _LT_TAGVAR(hardcode_direct, $1)=no ++ _LT_TAGVAR(hardcode_automatic, $1)=yes ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported ++ if test yes = "$lt_cv_ld_force_load"; then ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' ++ m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes], ++ [FC], [_LT_TAGVAR(compiler_needs_object, $1)=yes]) ++ else ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='' ++ fi ++ _LT_TAGVAR(link_all_deplibs, $1)=yes ++ _LT_TAGVAR(allow_undefined_flag, $1)=$_lt_dar_allow_undefined ++ case $cc_basename in ++ ifort*|nagfor*) _lt_dar_can_shared=yes ;; ++ *) _lt_dar_can_shared=$GCC ;; ++ esac ++ if test yes = "$_lt_dar_can_shared"; then ++ output_verbose_link_cmd=func_echo_all ++ _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" ++ _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" ++ _LT_TAGVAR(archive_expsym_cmds, $1)="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" ++ _LT_TAGVAR(module_expsym_cmds, $1)="$SED -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" ++ m4_if([$1], [CXX], ++[ if test yes != "$lt_cv_apple_cc_single_mod"; then ++ _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" ++ _LT_TAGVAR(archive_expsym_cmds, $1)="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" ++ fi ++],[]) ++ else ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++]) ++ ++# _LT_SYS_MODULE_PATH_AIX([TAGNAME]) ++# ---------------------------------- ++# Links a minimal program and checks the executable ++# for the system default hardcoded library path. In most cases, ++# this is /usr/lib:/lib, but when the MPI compilers are used ++# the location of the communication and MPI libs are included too. ++# If we don't find anything, use the default library path according ++# to the aix ld manual. ++# Store the results from the different compilers for each TAGNAME. ++# Allow to override them for all tags through lt_cv_aix_libpath. ++m4_defun([_LT_SYS_MODULE_PATH_AIX], ++[m4_require([_LT_DECL_SED])dnl ++if test set = "${lt_cv_aix_libpath+set}"; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], ++ [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ ++ lt_aix_libpath_sed='[ ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }]' ++ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then ++ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi],[]) ++ if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then ++ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=/usr/lib:/lib ++ fi ++ ]) ++ aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) ++fi ++])# _LT_SYS_MODULE_PATH_AIX ++ ++ ++# _LT_SHELL_INIT(ARG) ++# ------------------- ++m4_define([_LT_SHELL_INIT], ++[m4_divert_text([M4SH-INIT], [$1 ++])])# _LT_SHELL_INIT ++ ++ ++ ++# _LT_PROG_ECHO_BACKSLASH ++# ----------------------- ++# Find how we can fake an echo command that does not interpret backslash. ++# In particular, with Autoconf 2.60 or later we add some code to the start ++# of the generated configure script that will find a shell with a builtin ++# printf (that we can use as an echo command). ++m4_defun([_LT_PROG_ECHO_BACKSLASH], ++[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ++ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ++ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO ++ ++AC_MSG_CHECKING([how to print strings]) ++# Test print first, because it will be a builtin if present. ++if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ ++ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then ++ ECHO='print -r --' ++elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then ++ ECHO='printf %s\n' ++else ++ # Use this function as a fallback that always works. ++ func_fallback_echo () ++ { ++ eval 'cat <<_LTECHO_EOF ++$[]1 ++_LTECHO_EOF' ++ } ++ ECHO='func_fallback_echo' ++fi ++ ++# func_echo_all arg... ++# Invoke $ECHO with all args, space-separated. ++func_echo_all () ++{ ++ $ECHO "$*" ++} ++ ++case $ECHO in ++ printf*) AC_MSG_RESULT([printf]) ;; ++ print*) AC_MSG_RESULT([print -r]) ;; ++ *) AC_MSG_RESULT([cat]) ;; ++esac ++ ++m4_ifdef([_AS_DETECT_SUGGESTED], ++[_AS_DETECT_SUGGESTED([ ++ test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || ( ++ ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ++ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ++ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO ++ PATH=/empty FPATH=/empty; export PATH FPATH ++ test "X`printf %s $ECHO`" = "X$ECHO" \ ++ || test "X`print -r -- $ECHO`" = "X$ECHO" )])]) ++ ++_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) ++_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) ++])# _LT_PROG_ECHO_BACKSLASH ++ ++ ++# _LT_WITH_SYSROOT ++# ---------------- ++AC_DEFUN([_LT_WITH_SYSROOT], ++[m4_require([_LT_DECL_SED])dnl ++AC_MSG_CHECKING([for sysroot]) ++AC_ARG_WITH([sysroot], ++[AS_HELP_STRING([--with-sysroot@<:@=DIR@:>@], ++ [Search for dependent libraries within DIR (or the compiler's sysroot ++ if not specified).])], ++[], [with_sysroot=no]) ++ ++dnl lt_sysroot will always be passed unquoted. We quote it here ++dnl in case the user passed a directory name. ++lt_sysroot= ++case $with_sysroot in #( ++ yes) ++ if test yes = "$GCC"; then ++ lt_sysroot=`$CC --print-sysroot 2>/dev/null` ++ fi ++ ;; #( ++ /*) ++ lt_sysroot=`echo "$with_sysroot" | $SED -e "$sed_quote_subst"` ++ ;; #( ++ no|'') ++ ;; #( ++ *) ++ AC_MSG_RESULT([$with_sysroot]) ++ AC_MSG_ERROR([The sysroot must be an absolute path.]) ++ ;; ++esac ++ ++ AC_MSG_RESULT([${lt_sysroot:-no}]) ++_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl ++[dependent libraries, and where our libraries should be installed.])]) ++ ++# _LT_ENABLE_LOCK ++# --------------- ++m4_defun([_LT_ENABLE_LOCK], ++[AC_ARG_ENABLE([libtool-lock], ++ [AS_HELP_STRING([--disable-libtool-lock], ++ [avoid locking (might break parallel builds)])]) ++test no = "$enable_libtool_lock" || enable_libtool_lock=yes ++ ++# Some flags need to be propagated to the compiler or linker for good ++# libtool support. ++case $host in ++ia64-*-hpux*) ++ # Find out what ABI is being produced by ac_compile, and set mode ++ # options accordingly. ++ echo 'int i;' > conftest.$ac_ext ++ if AC_TRY_EVAL(ac_compile); then ++ case `$FILECMD conftest.$ac_objext` in ++ *ELF-32*) ++ HPUX_IA64_MODE=32 ++ ;; ++ *ELF-64*) ++ HPUX_IA64_MODE=64 ++ ;; ++ esac ++ fi ++ rm -rf conftest* ++ ;; ++*-*-irix6*) ++ # Find out what ABI is being produced by ac_compile, and set linker ++ # options accordingly. ++ echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext ++ if AC_TRY_EVAL(ac_compile); then ++ if test yes = "$lt_cv_prog_gnu_ld"; then ++ case `$FILECMD conftest.$ac_objext` in ++ *32-bit*) ++ LD="${LD-ld} -melf32bsmip" ++ ;; ++ *N32*) ++ LD="${LD-ld} -melf32bmipn32" ++ ;; ++ *64-bit*) ++ LD="${LD-ld} -melf64bmip" ++ ;; ++ esac ++ else ++ case `$FILECMD conftest.$ac_objext` in ++ *32-bit*) ++ LD="${LD-ld} -32" ++ ;; ++ *N32*) ++ LD="${LD-ld} -n32" ++ ;; ++ *64-bit*) ++ LD="${LD-ld} -64" ++ ;; ++ esac ++ fi ++ fi ++ rm -rf conftest* ++ ;; ++ ++mips64*-*linux*) ++ # Find out what ABI is being produced by ac_compile, and set linker ++ # options accordingly. ++ echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext ++ if AC_TRY_EVAL(ac_compile); then ++ emul=elf ++ case `$FILECMD conftest.$ac_objext` in ++ *32-bit*) ++ emul="${emul}32" ++ ;; ++ *64-bit*) ++ emul="${emul}64" ++ ;; ++ esac ++ case `$FILECMD conftest.$ac_objext` in ++ *MSB*) ++ emul="${emul}btsmip" ++ ;; ++ *LSB*) ++ emul="${emul}ltsmip" ++ ;; ++ esac ++ case `$FILECMD conftest.$ac_objext` in ++ *N32*) ++ emul="${emul}n32" ++ ;; ++ esac ++ LD="${LD-ld} -m $emul" ++ fi ++ rm -rf conftest* ++ ;; ++ ++x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ ++s390*-*linux*|s390*-*tpf*|sparc*-*linux*) ++ # Find out what ABI is being produced by ac_compile, and set linker ++ # options accordingly. Note that the listed cases only cover the ++ # situations where additional linker options are needed (such as when ++ # doing 32-bit compilation for a host where ld defaults to 64-bit, or ++ # vice versa); the common cases where no linker options are needed do ++ # not appear in the list. ++ echo 'int i;' > conftest.$ac_ext ++ if AC_TRY_EVAL(ac_compile); then ++ case `$FILECMD conftest.o` in ++ *32-bit*) ++ case $host in ++ x86_64-*kfreebsd*-gnu) ++ LD="${LD-ld} -m elf_i386_fbsd" ++ ;; ++ x86_64-*linux*) ++ case `$FILECMD conftest.o` in ++ *x86-64*) ++ LD="${LD-ld} -m elf32_x86_64" ++ ;; ++ *) ++ LD="${LD-ld} -m elf_i386" ++ ;; ++ esac ++ ;; ++ powerpc64le-*linux*) ++ LD="${LD-ld} -m elf32lppclinux" ++ ;; ++ powerpc64-*linux*) ++ LD="${LD-ld} -m elf32ppclinux" ++ ;; ++ s390x-*linux*) ++ LD="${LD-ld} -m elf_s390" ++ ;; ++ sparc64-*linux*) ++ LD="${LD-ld} -m elf32_sparc" ++ ;; ++ esac ++ ;; ++ *64-bit*) ++ case $host in ++ x86_64-*kfreebsd*-gnu) ++ LD="${LD-ld} -m elf_x86_64_fbsd" ++ ;; ++ x86_64-*linux*) ++ LD="${LD-ld} -m elf_x86_64" ++ ;; ++ powerpcle-*linux*) ++ LD="${LD-ld} -m elf64lppc" ++ ;; ++ powerpc-*linux*) ++ LD="${LD-ld} -m elf64ppc" ++ ;; ++ s390*-*linux*|s390*-*tpf*) ++ LD="${LD-ld} -m elf64_s390" ++ ;; ++ sparc*-*linux*) ++ LD="${LD-ld} -m elf64_sparc" ++ ;; ++ esac ++ ;; ++ esac ++ fi ++ rm -rf conftest* ++ ;; ++ ++*-*-sco3.2v5*) ++ # On SCO OpenServer 5, we need -belf to get full-featured binaries. ++ SAVE_CFLAGS=$CFLAGS ++ CFLAGS="$CFLAGS -belf" ++ AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, ++ [AC_LANG_PUSH(C) ++ AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) ++ AC_LANG_POP]) ++ if test yes != "$lt_cv_cc_needs_belf"; then ++ # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf ++ CFLAGS=$SAVE_CFLAGS ++ fi ++ ;; ++*-*solaris*) ++ # Find out what ABI is being produced by ac_compile, and set linker ++ # options accordingly. ++ echo 'int i;' > conftest.$ac_ext ++ if AC_TRY_EVAL(ac_compile); then ++ case `$FILECMD conftest.o` in ++ *64-bit*) ++ case $lt_cv_prog_gnu_ld in ++ yes*) ++ case $host in ++ i?86-*-solaris*|x86_64-*-solaris*) ++ LD="${LD-ld} -m elf_x86_64" ++ ;; ++ sparc*-*-solaris*) ++ LD="${LD-ld} -m elf64_sparc" ++ ;; ++ esac ++ # GNU ld 2.21 introduced _sol2 emulations. Use them if available. ++ if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then ++ LD=${LD-ld}_sol2 ++ fi ++ ;; ++ *) ++ if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then ++ LD="${LD-ld} -64" ++ fi ++ ;; ++ esac ++ ;; ++ esac ++ fi ++ rm -rf conftest* ++ ;; ++esac ++ ++need_locks=$enable_libtool_lock ++])# _LT_ENABLE_LOCK ++ ++ ++# _LT_PROG_AR ++# ----------- ++m4_defun([_LT_PROG_AR], ++[AC_CHECK_TOOLS(AR, [ar], false) ++: ${AR=ar} ++_LT_DECL([], [AR], [1], [The archiver]) ++ ++# Use ARFLAGS variable as AR's operation code to sync the variable naming with ++# Automake. If both AR_FLAGS and ARFLAGS are specified, AR_FLAGS should have ++# higher priority because thats what people were doing historically (setting ++# ARFLAGS for automake and AR_FLAGS for libtool). FIXME: Make the AR_FLAGS ++# variable obsoleted/removed. ++ ++test ${AR_FLAGS+y} || AR_FLAGS=${ARFLAGS-cr} ++lt_ar_flags=$AR_FLAGS ++_LT_DECL([], [lt_ar_flags], [0], [Flags to create an archive (by configure)]) ++ ++# Make AR_FLAGS overridable by 'make ARFLAGS='. Don't try to run-time override ++# by AR_FLAGS because that was never working and AR_FLAGS is about to die. ++_LT_DECL([], [AR_FLAGS], [\@S|@{ARFLAGS-"\@S|@lt_ar_flags"}], ++ [Flags to create an archive]) ++ ++AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], ++ [lt_cv_ar_at_file=no ++ AC_COMPILE_IFELSE([AC_LANG_PROGRAM], ++ [echo conftest.$ac_objext > conftest.lst ++ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' ++ AC_TRY_EVAL([lt_ar_try]) ++ if test 0 -eq "$ac_status"; then ++ # Ensure the archiver fails upon bogus file names. ++ rm -f conftest.$ac_objext libconftest.a ++ AC_TRY_EVAL([lt_ar_try]) ++ if test 0 -ne "$ac_status"; then ++ lt_cv_ar_at_file=@ ++ fi ++ fi ++ rm -f conftest.* libconftest.a ++ ]) ++ ]) ++ ++if test no = "$lt_cv_ar_at_file"; then ++ archiver_list_spec= ++else ++ archiver_list_spec=$lt_cv_ar_at_file ++fi ++_LT_DECL([], [archiver_list_spec], [1], ++ [How to feed a file listing to the archiver]) ++])# _LT_PROG_AR ++ ++ ++# _LT_CMD_OLD_ARCHIVE ++# ------------------- ++m4_defun([_LT_CMD_OLD_ARCHIVE], ++[_LT_PROG_AR ++ ++AC_CHECK_TOOL(STRIP, strip, :) ++test -z "$STRIP" && STRIP=: ++_LT_DECL([], [STRIP], [1], [A symbol stripping program]) ++ ++AC_CHECK_TOOL(RANLIB, ranlib, :) ++test -z "$RANLIB" && RANLIB=: ++_LT_DECL([], [RANLIB], [1], ++ [Commands used to install an old-style archive]) ++ ++# Determine commands to create old-style static archives. ++old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' ++old_postinstall_cmds='chmod 644 $oldlib' ++old_postuninstall_cmds= ++ ++if test -n "$RANLIB"; then ++ case $host_os in ++ bitrig* | openbsd*) ++ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" ++ ;; ++ *) ++ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" ++ ;; ++ esac ++ old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" ++fi ++ ++case $host_os in ++ darwin*) ++ lock_old_archive_extraction=yes ;; ++ *) ++ lock_old_archive_extraction=no ;; ++esac ++_LT_DECL([], [old_postinstall_cmds], [2]) ++_LT_DECL([], [old_postuninstall_cmds], [2]) ++_LT_TAGDECL([], [old_archive_cmds], [2], ++ [Commands used to build an old-style archive]) ++_LT_DECL([], [lock_old_archive_extraction], [0], ++ [Whether to use a lock for old archive extraction]) ++])# _LT_CMD_OLD_ARCHIVE ++ ++ ++# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, ++# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) ++# ---------------------------------------------------------------- ++# Check whether the given compiler option works ++AC_DEFUN([_LT_COMPILER_OPTION], ++[m4_require([_LT_FILEUTILS_DEFAULTS])dnl ++m4_require([_LT_DECL_SED])dnl ++AC_CACHE_CHECK([$1], [$2], ++ [$2=no ++ m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) ++ echo "$lt_simple_compile_test_code" > conftest.$ac_ext ++ lt_compiler_flag="$3" ## exclude from sc_useless_quotes_in_assignment ++ # Insert the option either (1) after the last *FLAGS variable, or ++ # (2) before a word containing "conftest.", or (3) at the end. ++ # Note that $ac_compile itself does not contain backslashes and begins ++ # with a dollar sign (not a hyphen), so the echo should work correctly. ++ # The option is referenced via a variable to avoid confusing sed. ++ lt_compile=`echo "$ac_compile" | $SED \ ++ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ ++ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ ++ -e 's:$: $lt_compiler_flag:'` ++ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) ++ (eval "$lt_compile" 2>conftest.err) ++ ac_status=$? ++ cat conftest.err >&AS_MESSAGE_LOG_FD ++ echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD ++ if (exit $ac_status) && test -s "$ac_outfile"; then ++ # The compiler can only warn and ignore the option if not recognized ++ # So say no if there are warnings other than the usual output. ++ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp ++ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 ++ if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then ++ $2=yes ++ fi ++ fi ++ $RM conftest* ++]) ++ ++if test yes = "[$]$2"; then ++ m4_if([$5], , :, [$5]) ++else ++ m4_if([$6], , :, [$6]) ++fi ++])# _LT_COMPILER_OPTION ++ ++# Old name: ++AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) ++ ++ ++# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, ++# [ACTION-SUCCESS], [ACTION-FAILURE]) ++# ---------------------------------------------------- ++# Check whether the given linker option works ++AC_DEFUN([_LT_LINKER_OPTION], ++[m4_require([_LT_FILEUTILS_DEFAULTS])dnl ++m4_require([_LT_DECL_SED])dnl ++AC_CACHE_CHECK([$1], [$2], ++ [$2=no ++ save_LDFLAGS=$LDFLAGS ++ LDFLAGS="$LDFLAGS $3" ++ echo "$lt_simple_link_test_code" > conftest.$ac_ext ++ if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then ++ # The linker can only warn and ignore the option if not recognized ++ # So say no if there are warnings ++ if test -s conftest.err; then ++ # Append any errors to the config.log. ++ cat conftest.err 1>&AS_MESSAGE_LOG_FD ++ $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp ++ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 ++ if diff conftest.exp conftest.er2 >/dev/null; then ++ $2=yes ++ fi ++ else ++ $2=yes ++ fi ++ fi ++ $RM -r conftest* ++ LDFLAGS=$save_LDFLAGS ++]) ++ ++if test yes = "[$]$2"; then ++ m4_if([$4], , :, [$4]) ++else ++ m4_if([$5], , :, [$5]) ++fi ++])# _LT_LINKER_OPTION ++ ++# Old name: ++AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) ++ ++ ++# LT_CMD_MAX_LEN ++#--------------- ++AC_DEFUN([LT_CMD_MAX_LEN], ++[AC_REQUIRE([AC_CANONICAL_HOST])dnl ++# find the maximum length of command line arguments ++AC_MSG_CHECKING([the maximum length of command line arguments]) ++AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl ++ i=0 ++ teststring=ABCD ++ ++ case $build_os in ++ msdosdjgpp*) ++ # On DJGPP, this test can blow up pretty badly due to problems in libc ++ # (any single argument exceeding 2000 bytes causes a buffer overrun ++ # during glob expansion). Even if it were fixed, the result of this ++ # check would be larger than it should be. ++ lt_cv_sys_max_cmd_len=12288; # 12K is about right ++ ;; ++ ++ gnu*) ++ # Under GNU Hurd, this test is not required because there is ++ # no limit to the length of command line arguments. ++ # Libtool will interpret -1 as no limit whatsoever ++ lt_cv_sys_max_cmd_len=-1; ++ ;; ++ ++ cygwin* | mingw* | cegcc*) ++ # On Win9x/ME, this test blows up -- it succeeds, but takes ++ # about 5 minutes as the teststring grows exponentially. ++ # Worse, since 9x/ME are not pre-emptively multitasking, ++ # you end up with a "frozen" computer, even though with patience ++ # the test eventually succeeds (with a max line length of 256k). ++ # Instead, let's just punt: use the minimum linelength reported by ++ # all of the supported platforms: 8192 (on NT/2K/XP). ++ lt_cv_sys_max_cmd_len=8192; ++ ;; ++ ++ mint*) ++ # On MiNT this can take a long time and run out of memory. ++ lt_cv_sys_max_cmd_len=8192; ++ ;; ++ ++ amigaos*) ++ # On AmigaOS with pdksh, this test takes hours, literally. ++ # So we just punt and use a minimum line length of 8192. ++ lt_cv_sys_max_cmd_len=8192; ++ ;; ++ ++ bitrig* | darwin* | dragonfly* | freebsd* | midnightbsd* | netbsd* | openbsd*) ++ # This has been around since 386BSD, at least. Likely further. ++ if test -x /sbin/sysctl; then ++ lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` ++ elif test -x /usr/sbin/sysctl; then ++ lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` ++ else ++ lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs ++ fi ++ # And add a safety zone ++ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` ++ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ++ ;; ++ ++ interix*) ++ # We know the value 262144 and hardcode it with a safety zone (like BSD) ++ lt_cv_sys_max_cmd_len=196608 ++ ;; ++ ++ os2*) ++ # The test takes a long time on OS/2. ++ lt_cv_sys_max_cmd_len=8192 ++ ;; ++ ++ osf*) ++ # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure ++ # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not ++ # nice to cause kernel panics so lets avoid the loop below. ++ # First set a reasonable default. ++ lt_cv_sys_max_cmd_len=16384 ++ # ++ if test -x /sbin/sysconfig; then ++ case `/sbin/sysconfig -q proc exec_disable_arg_limit` in ++ *1*) lt_cv_sys_max_cmd_len=-1 ;; ++ esac ++ fi ++ ;; ++ sco3.2v5*) ++ lt_cv_sys_max_cmd_len=102400 ++ ;; ++ sysv5* | sco5v6* | sysv4.2uw2*) ++ kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` ++ if test -n "$kargmax"; then ++ lt_cv_sys_max_cmd_len=`echo $kargmax | $SED 's/.*[[ ]]//'` ++ else ++ lt_cv_sys_max_cmd_len=32768 ++ fi ++ ;; ++ *) ++ lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` ++ if test -n "$lt_cv_sys_max_cmd_len" && \ ++ test undefined != "$lt_cv_sys_max_cmd_len"; then ++ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` ++ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ++ else ++ # Make teststring a little bigger before we do anything with it. ++ # a 1K string should be a reasonable start. ++ for i in 1 2 3 4 5 6 7 8; do ++ teststring=$teststring$teststring ++ done ++ SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} ++ # If test is not a shell built-in, we'll probably end up computing a ++ # maximum length that is only half of the actual maximum length, but ++ # we can't tell. ++ while { test X`env echo "$teststring$teststring" 2>/dev/null` \ ++ = "X$teststring$teststring"; } >/dev/null 2>&1 && ++ test 17 != "$i" # 1/2 MB should be enough ++ do ++ i=`expr $i + 1` ++ teststring=$teststring$teststring ++ done ++ # Only check the string length outside the loop. ++ lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` ++ teststring= ++ # Add a significant safety factor because C++ compilers can tack on ++ # massive amounts of additional arguments before passing them to the ++ # linker. It appears as though 1/2 is a usable value. ++ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` ++ fi ++ ;; ++ esac ++]) ++if test -n "$lt_cv_sys_max_cmd_len"; then ++ AC_MSG_RESULT($lt_cv_sys_max_cmd_len) ++else ++ AC_MSG_RESULT(none) ++fi ++max_cmd_len=$lt_cv_sys_max_cmd_len ++_LT_DECL([], [max_cmd_len], [0], ++ [What is the maximum length of a command?]) ++])# LT_CMD_MAX_LEN ++ ++# Old name: ++AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) ++ ++ ++# _LT_HEADER_DLFCN ++# ---------------- ++m4_defun([_LT_HEADER_DLFCN], ++[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl ++])# _LT_HEADER_DLFCN ++ ++ ++# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, ++# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) ++# ---------------------------------------------------------------- ++m4_defun([_LT_TRY_DLOPEN_SELF], ++[m4_require([_LT_HEADER_DLFCN])dnl ++if test yes = "$cross_compiling"; then : ++ [$4] ++else ++ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 ++ lt_status=$lt_dlunknown ++ cat > conftest.$ac_ext <<_LT_EOF ++[#line $LINENO "configure" ++#include "confdefs.h" ++ ++#if HAVE_DLFCN_H ++#include ++#endif ++ ++#include ++ ++#ifdef RTLD_GLOBAL ++# define LT_DLGLOBAL RTLD_GLOBAL ++#else ++# ifdef DL_GLOBAL ++# define LT_DLGLOBAL DL_GLOBAL ++# else ++# define LT_DLGLOBAL 0 ++# endif ++#endif ++ ++/* We may have to define LT_DLLAZY_OR_NOW in the command line if we ++ find out it does not work in some platform. */ ++#ifndef LT_DLLAZY_OR_NOW ++# ifdef RTLD_LAZY ++# define LT_DLLAZY_OR_NOW RTLD_LAZY ++# else ++# ifdef DL_LAZY ++# define LT_DLLAZY_OR_NOW DL_LAZY ++# else ++# ifdef RTLD_NOW ++# define LT_DLLAZY_OR_NOW RTLD_NOW ++# else ++# ifdef DL_NOW ++# define LT_DLLAZY_OR_NOW DL_NOW ++# else ++# define LT_DLLAZY_OR_NOW 0 ++# endif ++# endif ++# endif ++# endif ++#endif ++ ++/* When -fvisibility=hidden is used, assume the code has been annotated ++ correspondingly for the symbols needed. */ ++#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) ++int fnord () __attribute__((visibility("default"))); ++#endif ++ ++int fnord () { return 42; } ++int main () ++{ ++ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); ++ int status = $lt_dlunknown; ++ ++ if (self) ++ { ++ if (dlsym (self,"fnord")) status = $lt_dlno_uscore; ++ else ++ { ++ if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; ++ else puts (dlerror ()); ++ } ++ /* dlclose (self); */ ++ } ++ else ++ puts (dlerror ()); ++ ++ return status; ++}] ++_LT_EOF ++ if AC_TRY_EVAL(ac_link) && test -s "conftest$ac_exeext" 2>/dev/null; then ++ (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null ++ lt_status=$? ++ case x$lt_status in ++ x$lt_dlno_uscore) $1 ;; ++ x$lt_dlneed_uscore) $2 ;; ++ x$lt_dlunknown|x*) $3 ;; ++ esac ++ else : ++ # compilation failed ++ $3 ++ fi ++fi ++rm -fr conftest* ++])# _LT_TRY_DLOPEN_SELF ++ ++ ++# LT_SYS_DLOPEN_SELF ++# ------------------ ++AC_DEFUN([LT_SYS_DLOPEN_SELF], ++[m4_require([_LT_HEADER_DLFCN])dnl ++if test yes != "$enable_dlopen"; then ++ enable_dlopen=unknown ++ enable_dlopen_self=unknown ++ enable_dlopen_self_static=unknown ++else ++ lt_cv_dlopen=no ++ lt_cv_dlopen_libs= ++ ++ case $host_os in ++ beos*) ++ lt_cv_dlopen=load_add_on ++ lt_cv_dlopen_libs= ++ lt_cv_dlopen_self=yes ++ ;; ++ ++ mingw* | pw32* | cegcc*) ++ lt_cv_dlopen=LoadLibrary ++ lt_cv_dlopen_libs= ++ ;; ++ ++ cygwin*) ++ lt_cv_dlopen=dlopen ++ lt_cv_dlopen_libs= ++ ;; ++ ++ darwin*) ++ # if libdl is installed we need to link against it ++ AC_CHECK_LIB([dl], [dlopen], ++ [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl],[ ++ lt_cv_dlopen=dyld ++ lt_cv_dlopen_libs= ++ lt_cv_dlopen_self=yes ++ ]) ++ ;; ++ ++ tpf*) ++ # Don't try to run any link tests for TPF. We know it's impossible ++ # because TPF is a cross-compiler, and we know how we open DSOs. ++ lt_cv_dlopen=dlopen ++ lt_cv_dlopen_libs= ++ lt_cv_dlopen_self=no ++ ;; ++ ++ *) ++ AC_CHECK_FUNC([shl_load], ++ [lt_cv_dlopen=shl_load], ++ [AC_CHECK_LIB([dld], [shl_load], ++ [lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld], ++ [AC_CHECK_FUNC([dlopen], ++ [lt_cv_dlopen=dlopen], ++ [AC_CHECK_LIB([dl], [dlopen], ++ [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl], ++ [AC_CHECK_LIB([svld], [dlopen], ++ [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld], ++ [AC_CHECK_LIB([dld], [dld_link], ++ [lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld]) ++ ]) ++ ]) ++ ]) ++ ]) ++ ]) ++ ;; ++ esac ++ ++ if test no = "$lt_cv_dlopen"; then ++ enable_dlopen=no ++ else ++ enable_dlopen=yes ++ fi ++ ++ case $lt_cv_dlopen in ++ dlopen) ++ save_CPPFLAGS=$CPPFLAGS ++ test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" ++ ++ save_LDFLAGS=$LDFLAGS ++ wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" ++ ++ save_LIBS=$LIBS ++ LIBS="$lt_cv_dlopen_libs $LIBS" ++ ++ AC_CACHE_CHECK([whether a program can dlopen itself], ++ lt_cv_dlopen_self, [dnl ++ _LT_TRY_DLOPEN_SELF( ++ lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, ++ lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) ++ ]) ++ ++ if test yes = "$lt_cv_dlopen_self"; then ++ wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" ++ AC_CACHE_CHECK([whether a statically linked program can dlopen itself], ++ lt_cv_dlopen_self_static, [dnl ++ _LT_TRY_DLOPEN_SELF( ++ lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, ++ lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) ++ ]) ++ fi ++ ++ CPPFLAGS=$save_CPPFLAGS ++ LDFLAGS=$save_LDFLAGS ++ LIBS=$save_LIBS ++ ;; ++ esac ++ ++ case $lt_cv_dlopen_self in ++ yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; ++ *) enable_dlopen_self=unknown ;; ++ esac ++ ++ case $lt_cv_dlopen_self_static in ++ yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; ++ *) enable_dlopen_self_static=unknown ;; ++ esac ++fi ++_LT_DECL([dlopen_support], [enable_dlopen], [0], ++ [Whether dlopen is supported]) ++_LT_DECL([dlopen_self], [enable_dlopen_self], [0], ++ [Whether dlopen of programs is supported]) ++_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], ++ [Whether dlopen of statically linked programs is supported]) ++])# LT_SYS_DLOPEN_SELF ++ ++# Old name: ++AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) ++ ++ ++# _LT_COMPILER_C_O([TAGNAME]) ++# --------------------------- ++# Check to see if options -c and -o are simultaneously supported by compiler. ++# This macro does not hard code the compiler like AC_PROG_CC_C_O. ++m4_defun([_LT_COMPILER_C_O], ++[m4_require([_LT_DECL_SED])dnl ++m4_require([_LT_FILEUTILS_DEFAULTS])dnl ++m4_require([_LT_TAG_COMPILER])dnl ++AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], ++ [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], ++ [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no ++ $RM -r conftest 2>/dev/null ++ mkdir conftest ++ cd conftest ++ mkdir out ++ echo "$lt_simple_compile_test_code" > conftest.$ac_ext ++ ++ lt_compiler_flag="-o out/conftest2.$ac_objext" ++ # Insert the option either (1) after the last *FLAGS variable, or ++ # (2) before a word containing "conftest.", or (3) at the end. ++ # Note that $ac_compile itself does not contain backslashes and begins ++ # with a dollar sign (not a hyphen), so the echo should work correctly. ++ lt_compile=`echo "$ac_compile" | $SED \ ++ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ ++ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ ++ -e 's:$: $lt_compiler_flag:'` ++ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) ++ (eval "$lt_compile" 2>out/conftest.err) ++ ac_status=$? ++ cat out/conftest.err >&AS_MESSAGE_LOG_FD ++ echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD ++ if (exit $ac_status) && test -s out/conftest2.$ac_objext ++ then ++ # The compiler can only warn and ignore the option if not recognized ++ # So say no if there are warnings ++ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp ++ $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 ++ if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then ++ _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes ++ fi ++ fi ++ chmod u+w . 2>&AS_MESSAGE_LOG_FD ++ $RM conftest* ++ # SGI C++ compiler will create directory out/ii_files/ for ++ # template instantiation ++ test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files ++ $RM out/* && rmdir out ++ cd .. ++ $RM -r conftest ++ $RM conftest* ++]) ++_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], ++ [Does compiler simultaneously support -c and -o options?]) ++])# _LT_COMPILER_C_O ++ ++ ++# _LT_COMPILER_FILE_LOCKS([TAGNAME]) ++# ---------------------------------- ++# Check to see if we can do hard links to lock some files if needed ++m4_defun([_LT_COMPILER_FILE_LOCKS], ++[m4_require([_LT_ENABLE_LOCK])dnl ++m4_require([_LT_FILEUTILS_DEFAULTS])dnl ++_LT_COMPILER_C_O([$1]) ++ ++hard_links=nottested ++if test no = "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" && test no != "$need_locks"; then ++ # do not overwrite the value of need_locks provided by the user ++ AC_MSG_CHECKING([if we can lock with hard links]) ++ hard_links=yes ++ $RM conftest* ++ ln conftest.a conftest.b 2>/dev/null && hard_links=no ++ touch conftest.a ++ ln conftest.a conftest.b 2>&5 || hard_links=no ++ ln conftest.a conftest.b 2>/dev/null && hard_links=no ++ AC_MSG_RESULT([$hard_links]) ++ if test no = "$hard_links"; then ++ AC_MSG_WARN(['$CC' does not support '-c -o', so 'make -j' may be unsafe]) ++ need_locks=warn ++ fi ++else ++ need_locks=no ++fi ++_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) ++])# _LT_COMPILER_FILE_LOCKS ++ ++ ++# _LT_CHECK_OBJDIR ++# ---------------- ++m4_defun([_LT_CHECK_OBJDIR], ++[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], ++[rm -f .libs 2>/dev/null ++mkdir .libs 2>/dev/null ++if test -d .libs; then ++ lt_cv_objdir=.libs ++else ++ # MS-DOS does not allow filenames that begin with a dot. ++ lt_cv_objdir=_libs ++fi ++rmdir .libs 2>/dev/null]) ++objdir=$lt_cv_objdir ++_LT_DECL([], [objdir], [0], ++ [The name of the directory that contains temporary libtool files])dnl ++m4_pattern_allow([LT_OBJDIR])dnl ++AC_DEFINE_UNQUOTED([LT_OBJDIR], "$lt_cv_objdir/", ++ [Define to the sub-directory where libtool stores uninstalled libraries.]) ++])# _LT_CHECK_OBJDIR ++ ++ ++# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) ++# -------------------------------------- ++# Check hardcoding attributes. ++m4_defun([_LT_LINKER_HARDCODE_LIBPATH], ++[AC_MSG_CHECKING([how to hardcode library paths into programs]) ++_LT_TAGVAR(hardcode_action, $1)= ++if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || ++ test -n "$_LT_TAGVAR(runpath_var, $1)" || ++ test yes = "$_LT_TAGVAR(hardcode_automatic, $1)"; then ++ ++ # We can hardcode non-existent directories. ++ if test no != "$_LT_TAGVAR(hardcode_direct, $1)" && ++ # If the only mechanism to avoid hardcoding is shlibpath_var, we ++ # have to relink, otherwise we might link with an installed library ++ # when we should be linking with a yet-to-be-installed one ++ ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" && ++ test no != "$_LT_TAGVAR(hardcode_minus_L, $1)"; then ++ # Linking always hardcodes the temporary library directory. ++ _LT_TAGVAR(hardcode_action, $1)=relink ++ else ++ # We can link without hardcoding, and we can hardcode nonexisting dirs. ++ _LT_TAGVAR(hardcode_action, $1)=immediate ++ fi ++else ++ # We cannot hardcode anything, or else we can only hardcode existing ++ # directories. ++ _LT_TAGVAR(hardcode_action, $1)=unsupported ++fi ++AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) ++ ++if test relink = "$_LT_TAGVAR(hardcode_action, $1)" || ++ test yes = "$_LT_TAGVAR(inherit_rpath, $1)"; then ++ # Fast installation is not supported ++ enable_fast_install=no ++elif test yes = "$shlibpath_overrides_runpath" || ++ test no = "$enable_shared"; then ++ # Fast installation is not necessary ++ enable_fast_install=needless ++fi ++_LT_TAGDECL([], [hardcode_action], [0], ++ [How to hardcode a shared library path into an executable]) ++])# _LT_LINKER_HARDCODE_LIBPATH ++ ++ ++# _LT_CMD_STRIPLIB ++# ---------------- ++m4_defun([_LT_CMD_STRIPLIB], ++[m4_require([_LT_DECL_EGREP]) ++striplib= ++old_striplib= ++AC_MSG_CHECKING([whether stripping libraries is possible]) ++if test -z "$STRIP"; then ++ AC_MSG_RESULT([no]) ++else ++ if $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then ++ old_striplib="$STRIP --strip-debug" ++ striplib="$STRIP --strip-unneeded" ++ AC_MSG_RESULT([yes]) ++ else ++ case $host_os in ++ darwin*) ++ # FIXME - insert some real tests, host_os isn't really good enough ++ striplib="$STRIP -x" ++ old_striplib="$STRIP -S" ++ AC_MSG_RESULT([yes]) ++ ;; ++ freebsd*) ++ if $STRIP -V 2>&1 | $GREP "elftoolchain" >/dev/null; then ++ old_striplib="$STRIP --strip-debug" ++ striplib="$STRIP --strip-unneeded" ++ AC_MSG_RESULT([yes]) ++ else ++ AC_MSG_RESULT([no]) ++ fi ++ ;; ++ *) ++ AC_MSG_RESULT([no]) ++ ;; ++ esac ++ fi ++fi ++_LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) ++_LT_DECL([], [striplib], [1]) ++])# _LT_CMD_STRIPLIB ++ ++ ++# _LT_PREPARE_MUNGE_PATH_LIST ++# --------------------------- ++# Make sure func_munge_path_list() is defined correctly. ++m4_defun([_LT_PREPARE_MUNGE_PATH_LIST], ++[[# func_munge_path_list VARIABLE PATH ++# ----------------------------------- ++# VARIABLE is name of variable containing _space_ separated list of ++# directories to be munged by the contents of PATH, which is string ++# having a format: ++# "DIR[:DIR]:" ++# string "DIR[ DIR]" will be prepended to VARIABLE ++# ":DIR[:DIR]" ++# string "DIR[ DIR]" will be appended to VARIABLE ++# "DIRP[:DIRP]::[DIRA:]DIRA" ++# string "DIRP[ DIRP]" will be prepended to VARIABLE and string ++# "DIRA[ DIRA]" will be appended to VARIABLE ++# "DIR[:DIR]" ++# VARIABLE will be replaced by "DIR[ DIR]" ++func_munge_path_list () ++{ ++ case x@S|@2 in ++ x) ++ ;; ++ *:) ++ eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'` \@S|@@S|@1\" ++ ;; ++ x:*) ++ eval @S|@1=\"\@S|@@S|@1 `$ECHO @S|@2 | $SED 's/:/ /g'`\" ++ ;; ++ *::*) ++ eval @S|@1=\"\@S|@@S|@1\ `$ECHO @S|@2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" ++ eval @S|@1=\"`$ECHO @S|@2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \@S|@@S|@1\" ++ ;; ++ *) ++ eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'`\" ++ ;; ++ esac ++} ++]])# _LT_PREPARE_PATH_LIST ++ ++ ++# _LT_SYS_DYNAMIC_LINKER([TAG]) ++# ----------------------------- ++# PORTME Fill in your ld.so characteristics ++m4_defun([_LT_SYS_DYNAMIC_LINKER], ++[AC_REQUIRE([AC_CANONICAL_HOST])dnl ++m4_require([_LT_DECL_EGREP])dnl ++m4_require([_LT_FILEUTILS_DEFAULTS])dnl ++m4_require([_LT_DECL_OBJDUMP])dnl ++m4_require([_LT_DECL_SED])dnl ++m4_require([_LT_CHECK_SHELL_FEATURES])dnl ++m4_require([_LT_PREPARE_MUNGE_PATH_LIST])dnl ++AC_MSG_CHECKING([dynamic linker characteristics]) ++m4_if([$1], ++ [], [ ++if test yes = "$GCC"; then ++ case $host_os in ++ darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; ++ *) lt_awk_arg='/^libraries:/' ;; ++ esac ++ case $host_os in ++ mingw* | cegcc*) lt_sed_strip_eq='s|=\([[A-Za-z]]:\)|\1|g' ;; ++ *) lt_sed_strip_eq='s|=/|/|g' ;; ++ esac ++ lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` ++ case $lt_search_path_spec in ++ *\;*) ++ # if the path contains ";" then we assume it to be the separator ++ # otherwise default to the standard path separator (i.e. ":") - it is ++ # assumed that no part of a normal pathname contains ";" but that should ++ # okay in the real world where ";" in dirpaths is itself problematic. ++ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` ++ ;; ++ *) ++ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ esac ++ # Ok, now we have the path, separated by spaces, we can step through it ++ # and add multilib dir if necessary... ++ lt_tmp_lt_search_path_spec= ++ lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` ++ # ...but if some path component already ends with the multilib dir we assume ++ # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). ++ case "$lt_multi_os_dir; $lt_search_path_spec " in ++ "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) ++ lt_multi_os_dir= ++ ;; ++ esac ++ for lt_sys_path in $lt_search_path_spec; do ++ if test -d "$lt_sys_path$lt_multi_os_dir"; then ++ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" ++ elif test -n "$lt_multi_os_dir"; then ++ test -d "$lt_sys_path" && \ ++ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" ++ fi ++ done ++ lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' ++BEGIN {RS = " "; FS = "/|\n";} { ++ lt_foo = ""; ++ lt_count = 0; ++ for (lt_i = NF; lt_i > 0; lt_i--) { ++ if ($lt_i != "" && $lt_i != ".") { ++ if ($lt_i == "..") { ++ lt_count++; ++ } else { ++ if (lt_count == 0) { ++ lt_foo = "/" $lt_i lt_foo; ++ } else { ++ lt_count--; ++ } ++ } ++ } ++ } ++ if (lt_foo != "") { lt_freq[[lt_foo]]++; } ++ if (lt_freq[[lt_foo]] == 1) { print lt_foo; } ++}'` ++ # AWK program above erroneously prepends '/' to C:/dos/paths ++ # for these hosts. ++ case $host_os in ++ mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ ++ $SED 's|/\([[A-Za-z]]:\)|\1|g'` ;; ++ esac ++ sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` ++else ++ sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" ++fi]) ++library_names_spec= ++libname_spec='lib$name' ++soname_spec= ++shrext_cmds=.so ++postinstall_cmds= ++postuninstall_cmds= ++finish_cmds= ++finish_eval= ++shlibpath_var= ++shlibpath_overrides_runpath=unknown ++version_type=none ++dynamic_linker="$host_os ld.so" ++sys_lib_dlsearch_path_spec="/lib /usr/lib" ++need_lib_prefix=unknown ++hardcode_into_libs=no ++ ++# when you set need_version to no, make sure it does not cause -set_version ++# flags to be left without arguments ++need_version=unknown ++ ++AC_ARG_VAR([LT_SYS_LIBRARY_PATH], ++[User-defined run-time library search path.]) ++ ++case $host_os in ++aix3*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$release$shared_ext$versuffix $libname.a' ++ shlibpath_var=LIBPATH ++ ++ # AIX 3 has no versioning support, so we append a major version to the name. ++ soname_spec='$libname$release$shared_ext$major' ++ ;; ++ ++aix[[4-9]]*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ hardcode_into_libs=yes ++ if test ia64 = "$host_cpu"; then ++ # AIX 5 supports IA64 ++ library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' ++ shlibpath_var=LD_LIBRARY_PATH ++ else ++ # With GCC up to 2.95.x, collect2 would create an import file ++ # for dependence libraries. The import file would start with ++ # the line '#! .'. This would cause the generated library to ++ # depend on '.', always an invalid library. This was fixed in ++ # development snapshots of GCC prior to 3.0. ++ case $host_os in ++ aix4 | aix4.[[01]] | aix4.[[01]].*) ++ if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' ++ echo ' yes ' ++ echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then ++ : ++ else ++ can_build_shared=no ++ fi ++ ;; ++ esac ++ # Using Import Files as archive members, it is possible to support ++ # filename-based versioning of shared library archives on AIX. While ++ # this would work for both with and without runtime linking, it will ++ # prevent static linking of such archives. So we do filename-based ++ # shared library versioning with .so extension only, which is used ++ # when both runtime linking and shared linking is enabled. ++ # Unfortunately, runtime linking may impact performance, so we do ++ # not want this to be the default eventually. Also, we use the ++ # versioned .so libs for executables only if there is the -brtl ++ # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. ++ # To allow for filename-based versioning support, we need to create ++ # libNAME.so.V as an archive file, containing: ++ # *) an Import File, referring to the versioned filename of the ++ # archive as well as the shared archive member, telling the ++ # bitwidth (32 or 64) of that shared object, and providing the ++ # list of exported symbols of that shared object, eventually ++ # decorated with the 'weak' keyword ++ # *) the shared object with the F_LOADONLY flag set, to really avoid ++ # it being seen by the linker. ++ # At run time we better use the real file rather than another symlink, ++ # but for link time we create the symlink libNAME.so -> libNAME.so.V ++ ++ case $with_aix_soname,$aix_use_runtimelinking in ++ # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct ++ # soname into executable. Probably we can add versioning support to ++ # collect2, so additional links can be useful in future. ++ aix,yes) # traditional libtool ++ dynamic_linker='AIX unversionable lib.so' ++ # If using run time linking (on AIX 4.2 or later) use lib.so ++ # instead of lib.a to let people know that these are not ++ # typical AIX shared libraries. ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ ;; ++ aix,no) # traditional AIX only ++ dynamic_linker='AIX lib.a[(]lib.so.V[)]' ++ # We preserve .a as extension for shared libraries through AIX4.2 ++ # and later when we are not doing run time linking. ++ library_names_spec='$libname$release.a $libname.a' ++ soname_spec='$libname$release$shared_ext$major' ++ ;; ++ svr4,*) # full svr4 only ++ dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)]" ++ library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' ++ # We do not specify a path in Import Files, so LIBPATH fires. ++ shlibpath_overrides_runpath=yes ++ ;; ++ *,yes) # both, prefer svr4 ++ dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)], lib.a[(]lib.so.V[)]" ++ library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' ++ # unpreferred sharedlib libNAME.a needs extra handling ++ postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' ++ postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' ++ # We do not specify a path in Import Files, so LIBPATH fires. ++ shlibpath_overrides_runpath=yes ++ ;; ++ *,no) # both, prefer aix ++ dynamic_linker="AIX lib.a[(]lib.so.V[)], lib.so.V[(]$shared_archive_member_spec.o[)]" ++ library_names_spec='$libname$release.a $libname.a' ++ soname_spec='$libname$release$shared_ext$major' ++ # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling ++ postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' ++ postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' ++ ;; ++ esac ++ shlibpath_var=LIBPATH ++ fi ++ ;; ++ ++amigaos*) ++ case $host_cpu in ++ powerpc) ++ # Since July 2007 AmigaOS4 officially supports .so libraries. ++ # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ ;; ++ m68k) ++ library_names_spec='$libname.ixlibrary $libname.a' ++ # Create ${libname}_ixlibrary.a entries in /sys/libs. ++ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ++ ;; ++ esac ++ ;; ++ ++beos*) ++ library_names_spec='$libname$shared_ext' ++ dynamic_linker="$host_os ld.so" ++ shlibpath_var=LIBRARY_PATH ++ ;; ++ ++bsdi[[45]]*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' ++ shlibpath_var=LD_LIBRARY_PATH ++ sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" ++ sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" ++ # the default ld.so.conf also contains /usr/contrib/lib and ++ # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow ++ # libtool to hard-code these into programs ++ ;; ++ ++cygwin* | mingw* | pw32* | cegcc*) ++ version_type=windows ++ shrext_cmds=.dll ++ need_version=no ++ need_lib_prefix=no ++ ++ case $GCC,$cc_basename in ++ yes,*) ++ # gcc ++ library_names_spec='$libname.dll.a' ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \$file`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname~ ++ chmod a+x \$dldir/$dlname~ ++ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then ++ eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; ++ fi' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ ++ case $host_os in ++ cygwin*) ++ # Cygwin DLLs use 'cyg' prefix rather than 'lib' ++ soname_spec='`echo $libname | $SED -e 's/^lib/cyg/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' ++m4_if([$1], [],[ ++ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"]) ++ ;; ++ mingw* | cegcc*) ++ # MinGW DLLs use traditional 'lib' prefix ++ soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' ++ ;; ++ pw32*) ++ # pw32 DLLs use 'pw' prefix rather than 'lib' ++ library_names_spec='`echo $libname | $SED -e 's/^lib/pw/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' ++ ;; ++ esac ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ ++ *,cl* | *,icl*) ++ # Native MSVC or ICC ++ libname_spec='$name' ++ soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' ++ library_names_spec='$libname.dll.lib' ++ ++ case $build_os in ++ mingw*) ++ sys_lib_search_path_spec= ++ lt_save_ifs=$IFS ++ IFS=';' ++ for lt_path in $LIB ++ do ++ IFS=$lt_save_ifs ++ # Let DOS variable expansion print the short 8.3 style file name. ++ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` ++ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" ++ done ++ IFS=$lt_save_ifs ++ # Convert to MSYS style. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` ++ ;; ++ cygwin*) ++ # Convert to unix form, then to dos form, then back to unix form ++ # but this time dos style (no spaces!) so that the unix form looks ++ # like /cygdrive/c/PROGRA~1:/cygdr... ++ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` ++ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` ++ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ *) ++ sys_lib_search_path_spec=$LIB ++ if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then ++ # It is most probably a Windows format PATH. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` ++ else ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ fi ++ # FIXME: find the short name or the path components, as spaces are ++ # common. (e.g. "Program Files" -> "PROGRA~1") ++ ;; ++ esac ++ ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \$file`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ dynamic_linker='Win32 link.exe' ++ ;; ++ ++ *) ++ # Assume MSVC and ICC wrapper ++ library_names_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext $libname.lib' ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ esac ++ # FIXME: first we should search . and the directory the executable is in ++ shlibpath_var=PATH ++ ;; ++ ++darwin* | rhapsody*) ++ dynamic_linker="$host_os dyld" ++ version_type=darwin ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' ++ soname_spec='$libname$release$major$shared_ext' ++ shlibpath_overrides_runpath=yes ++ shlibpath_var=DYLD_LIBRARY_PATH ++ shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' ++m4_if([$1], [],[ ++ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) ++ sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ++ ;; ++ ++dgux*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ ;; ++ ++freebsd* | dragonfly* | midnightbsd*) ++ # DragonFly does not have aout. When/if they implement a new ++ # versioning mechanism, adjust this. ++ if test -x /usr/bin/objformat; then ++ objformat=`/usr/bin/objformat` ++ else ++ case $host_os in ++ freebsd[[23]].*) objformat=aout ;; ++ *) objformat=elf ;; ++ esac ++ fi ++ version_type=freebsd-$objformat ++ case $version_type in ++ freebsd-elf*) ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ need_version=no ++ need_lib_prefix=no ++ ;; ++ freebsd-*) ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ need_version=yes ++ ;; ++ esac ++ shlibpath_var=LD_LIBRARY_PATH ++ case $host_os in ++ freebsd2.*) ++ shlibpath_overrides_runpath=yes ++ ;; ++ freebsd3.[[01]]* | freebsdelf3.[[01]]*) ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ ;; ++ freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ ++ freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) ++ shlibpath_overrides_runpath=no ++ hardcode_into_libs=yes ++ ;; ++ *) # from 4.6 on, and DragonFly ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ ;; ++ esac ++ ;; ++ ++haiku*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ dynamic_linker="$host_os runtime_loader" ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' ++ hardcode_into_libs=yes ++ ;; ++ ++hpux9* | hpux10* | hpux11*) ++ # Give a soname corresponding to the major version so that dld.sl refuses to ++ # link against other versions. ++ version_type=sunos ++ need_lib_prefix=no ++ need_version=no ++ case $host_cpu in ++ ia64*) ++ shrext_cmds='.so' ++ hardcode_into_libs=yes ++ dynamic_linker="$host_os dld.so" ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ if test 32 = "$HPUX_IA64_MODE"; then ++ sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" ++ sys_lib_dlsearch_path_spec=/usr/lib/hpux32 ++ else ++ sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" ++ sys_lib_dlsearch_path_spec=/usr/lib/hpux64 ++ fi ++ ;; ++ hppa*64*) ++ shrext_cmds='.sl' ++ hardcode_into_libs=yes ++ dynamic_linker="$host_os dld.sl" ++ shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH ++ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" ++ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ++ ;; ++ *) ++ shrext_cmds='.sl' ++ dynamic_linker="$host_os dld.sl" ++ shlibpath_var=SHLIB_PATH ++ shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ ;; ++ esac ++ # HP-UX runs *really* slowly unless shared libraries are mode 555, ... ++ postinstall_cmds='chmod 555 $lib' ++ # or fails outright, so override atomically: ++ install_override_mode=555 ++ ;; ++ ++interix[[3-9]]*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ hardcode_into_libs=yes ++ ;; ++ ++irix5* | irix6* | nonstopux*) ++ case $host_os in ++ nonstopux*) version_type=nonstopux ;; ++ *) ++ if test yes = "$lt_cv_prog_gnu_ld"; then ++ version_type=linux # correct to gnu/linux during the next big refactor ++ else ++ version_type=irix ++ fi ;; ++ esac ++ need_lib_prefix=no ++ need_version=no ++ soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' ++ case $host_os in ++ irix5* | nonstopux*) ++ libsuff= shlibsuff= ++ ;; ++ *) ++ case $LD in # libtool.m4 will add one of these switches to LD ++ *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") ++ libsuff= shlibsuff= libmagic=32-bit;; ++ *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") ++ libsuff=32 shlibsuff=N32 libmagic=N32;; ++ *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") ++ libsuff=64 shlibsuff=64 libmagic=64-bit;; ++ *) libsuff= shlibsuff= libmagic=never-match;; ++ esac ++ ;; ++ esac ++ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH ++ shlibpath_overrides_runpath=no ++ sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" ++ sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" ++ hardcode_into_libs=yes ++ ;; ++ ++# No shared lib support for Linux oldld, aout, or coff. ++linux*oldld* | linux*aout* | linux*coff*) ++ dynamic_linker=no ++ ;; ++ ++linux*android*) ++ version_type=none # Android doesn't support versioned libraries. ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext' ++ soname_spec='$libname$release$shared_ext' ++ finish_cmds= ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ ++ # This implies no fast_install, which is unacceptable. ++ # Some rework will be needed to allow for fast_install ++ # before this can be enabled. ++ hardcode_into_libs=yes ++ ++ dynamic_linker='Android linker' ++ # Don't embed -rpath directories since the linker doesn't support them. ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ ;; ++ ++# This must be glibc/ELF. ++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ ++ # Some binutils ld are patched to set DT_RUNPATH ++ AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath], ++ [lt_cv_shlibpath_overrides_runpath=no ++ save_LDFLAGS=$LDFLAGS ++ save_libdir=$libdir ++ eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ ++ LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" ++ AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], ++ [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], ++ [lt_cv_shlibpath_overrides_runpath=yes])]) ++ LDFLAGS=$save_LDFLAGS ++ libdir=$save_libdir ++ ]) ++ shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath ++ ++ # This implies no fast_install, which is unacceptable. ++ # Some rework will be needed to allow for fast_install ++ # before this can be enabled. ++ hardcode_into_libs=yes ++ ++ # Add ABI-specific directories to the system library path. ++ sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" ++ ++ # Ideally, we could use ldconfig to report *all* directores which are ++ # searched for libraries, however this is still not possible. Aside from not ++ # being certain /sbin/ldconfig is available, command ++ # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, ++ # even though it is searched at run-time. Try to do the best guess by ++ # appending ld.so.conf contents (and includes) to the search path. ++ if test -f /etc/ld.so.conf; then ++ lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` ++ sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" ++ fi ++ ++ # We used to test for /lib/ld.so.1 and disable shared libraries on ++ # powerpc, because MkLinux only supported shared libraries with the ++ # GNU dynamic linker. Since this was broken with cross compilers, ++ # most powerpc-linux boxes support dynamic linking these days and ++ # people can always --disable-shared, the test was removed, and we ++ # assume the GNU/Linux dynamic linker is in use. ++ dynamic_linker='GNU/Linux ld.so' ++ ;; ++ ++netbsd*) ++ version_type=sunos ++ need_lib_prefix=no ++ need_version=no ++ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' ++ dynamic_linker='NetBSD (a.out) ld.so' ++ else ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ dynamic_linker='NetBSD ld.elf_so' ++ fi ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ ;; ++ ++newsos6) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ ;; ++ ++*nto* | *qnx*) ++ version_type=qnx ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ hardcode_into_libs=yes ++ dynamic_linker='ldqnx.so' ++ ;; ++ ++openbsd* | bitrig*) ++ version_type=sunos ++ sys_lib_dlsearch_path_spec=/usr/lib ++ need_lib_prefix=no ++ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then ++ need_version=no ++ else ++ need_version=yes ++ fi ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ ;; ++ ++os2*) ++ libname_spec='$name' ++ version_type=windows ++ shrext_cmds=.dll ++ need_version=no ++ need_lib_prefix=no ++ # OS/2 can only load a DLL with a base name of 8 characters or less. ++ soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; ++ v=$($ECHO $release$versuffix | tr -d .-); ++ n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); ++ $ECHO $n$v`$shared_ext' ++ library_names_spec='${libname}_dll.$libext' ++ dynamic_linker='OS/2 ld.exe' ++ shlibpath_var=BEGINLIBPATH ++ sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" ++ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ++ postinstall_cmds='base_file=`basename \$file`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname~ ++ chmod a+x \$dldir/$dlname~ ++ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then ++ eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; ++ fi' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ ;; ++ ++osf3* | osf4* | osf5*) ++ version_type=osf ++ need_lib_prefix=no ++ need_version=no ++ soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ shlibpath_var=LD_LIBRARY_PATH ++ sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" ++ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ++ ;; ++ ++rdos*) ++ dynamic_linker=no ++ ;; ++ ++solaris*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ # ldd complains unless libraries are executable ++ postinstall_cmds='chmod +x $lib' ++ ;; ++ ++sunos4*) ++ version_type=sunos ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ if test yes = "$with_gnu_ld"; then ++ need_lib_prefix=no ++ fi ++ need_version=yes ++ ;; ++ ++sysv4 | sysv4.3*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ case $host_vendor in ++ sni) ++ shlibpath_overrides_runpath=no ++ need_lib_prefix=no ++ runpath_var=LD_RUN_PATH ++ ;; ++ siemens) ++ need_lib_prefix=no ++ ;; ++ motorola) ++ need_lib_prefix=no ++ need_version=no ++ shlibpath_overrides_runpath=no ++ sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ++ ;; ++ esac ++ ;; ++ ++sysv4*MP*) ++ if test -d /usr/nec; then ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' ++ soname_spec='$libname$shared_ext.$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ fi ++ ;; ++ ++sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) ++ version_type=sco ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ if test yes = "$with_gnu_ld"; then ++ sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' ++ else ++ sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' ++ case $host_os in ++ sco3.2v5*) ++ sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ++ ;; ++ esac ++ fi ++ sys_lib_dlsearch_path_spec='/usr/lib' ++ ;; ++ ++tpf*) ++ # TPF is a cross-target only. Preferred cross-host = GNU/Linux. ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ hardcode_into_libs=yes ++ ;; ++ ++uts4*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ ;; ++ ++*) ++ dynamic_linker=no ++ ;; ++esac ++AC_MSG_RESULT([$dynamic_linker]) ++test no = "$dynamic_linker" && can_build_shared=no ++ ++variables_saved_for_relink="PATH $shlibpath_var $runpath_var" ++if test yes = "$GCC"; then ++ variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" ++fi ++ ++if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then ++ sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec ++fi ++ ++if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then ++ sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec ++fi ++ ++# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... ++configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec ++ ++# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code ++func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" ++ ++# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool ++configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH ++ ++_LT_DECL([], [variables_saved_for_relink], [1], ++ [Variables whose values should be saved in libtool wrapper scripts and ++ restored at link time]) ++_LT_DECL([], [need_lib_prefix], [0], ++ [Do we need the "lib" prefix for modules?]) ++_LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) ++_LT_DECL([], [version_type], [0], [Library versioning type]) ++_LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) ++_LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) ++_LT_DECL([], [shlibpath_overrides_runpath], [0], ++ [Is shlibpath searched before the hard-coded library search path?]) ++_LT_DECL([], [libname_spec], [1], [Format of library name prefix]) ++_LT_DECL([], [library_names_spec], [1], ++ [[List of archive names. First name is the real one, the rest are links. ++ The last name is the one that the linker finds with -lNAME]]) ++_LT_DECL([], [soname_spec], [1], ++ [[The coded name of the library, if different from the real name]]) ++_LT_DECL([], [install_override_mode], [1], ++ [Permission mode override for installation of shared libraries]) ++_LT_DECL([], [postinstall_cmds], [2], ++ [Command to use after installation of a shared archive]) ++_LT_DECL([], [postuninstall_cmds], [2], ++ [Command to use after uninstallation of a shared archive]) ++_LT_DECL([], [finish_cmds], [2], ++ [Commands used to finish a libtool library installation in a directory]) ++_LT_DECL([], [finish_eval], [1], ++ [[As "finish_cmds", except a single script fragment to be evaled but ++ not shown]]) ++_LT_DECL([], [hardcode_into_libs], [0], ++ [Whether we should hardcode library paths into libraries]) ++_LT_DECL([], [sys_lib_search_path_spec], [2], ++ [Compile-time system search path for libraries]) ++_LT_DECL([sys_lib_dlsearch_path_spec], [configure_time_dlsearch_path], [2], ++ [Detected run-time system search path for libraries]) ++_LT_DECL([], [configure_time_lt_sys_library_path], [2], ++ [Explicit LT_SYS_LIBRARY_PATH set during ./configure time]) ++])# _LT_SYS_DYNAMIC_LINKER ++ ++ ++# _LT_PATH_TOOL_PREFIX(TOOL) ++# -------------------------- ++# find a file program that can recognize shared library ++AC_DEFUN([_LT_PATH_TOOL_PREFIX], ++[m4_require([_LT_DECL_EGREP])dnl ++AC_MSG_CHECKING([for $1]) ++AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, ++[case $MAGIC_CMD in ++[[\\/*] | ?:[\\/]*]) ++ lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. ++ ;; ++*) ++ lt_save_MAGIC_CMD=$MAGIC_CMD ++ lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ++dnl $ac_dummy forces splitting on constant user-supplied paths. ++dnl POSIX.2 word splitting is done only on the output of word expansions, ++dnl not every word. This closes a longstanding sh security hole. ++ ac_dummy="m4_if([$2], , $PATH, [$2])" ++ for ac_dir in $ac_dummy; do ++ IFS=$lt_save_ifs ++ test -z "$ac_dir" && ac_dir=. ++ if test -f "$ac_dir/$1"; then ++ lt_cv_path_MAGIC_CMD=$ac_dir/"$1" ++ if test -n "$file_magic_test_file"; then ++ case $deplibs_check_method in ++ "file_magic "*) ++ file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` ++ MAGIC_CMD=$lt_cv_path_MAGIC_CMD ++ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | ++ $EGREP "$file_magic_regex" > /dev/null; then ++ : ++ else ++ cat <<_LT_EOF 1>&2 ++ ++*** Warning: the command libtool uses to detect shared libraries, ++*** $file_magic_cmd, produces output that libtool cannot recognize. ++*** The result is that libtool may fail to recognize shared libraries ++*** as such. This will affect the creation of libtool libraries that ++*** depend on shared libraries, but programs linked with such libtool ++*** libraries will work regardless of this problem. Nevertheless, you ++*** may want to report the problem to your system manager and/or to ++*** bug-libtool@gnu.org ++ ++_LT_EOF ++ fi ;; ++ esac ++ fi ++ break ++ fi ++ done ++ IFS=$lt_save_ifs ++ MAGIC_CMD=$lt_save_MAGIC_CMD ++ ;; ++esac]) ++MAGIC_CMD=$lt_cv_path_MAGIC_CMD ++if test -n "$MAGIC_CMD"; then ++ AC_MSG_RESULT($MAGIC_CMD) ++else ++ AC_MSG_RESULT(no) ++fi ++_LT_DECL([], [MAGIC_CMD], [0], ++ [Used to examine libraries when file_magic_cmd begins with "file"])dnl ++])# _LT_PATH_TOOL_PREFIX ++ ++# Old name: ++AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) ++ ++ ++# _LT_PATH_MAGIC ++# -------------- ++# find a file program that can recognize a shared library ++m4_defun([_LT_PATH_MAGIC], ++[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) ++if test -z "$lt_cv_path_MAGIC_CMD"; then ++ if test -n "$ac_tool_prefix"; then ++ _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) ++ else ++ MAGIC_CMD=: ++ fi ++fi ++])# _LT_PATH_MAGIC ++ ++ ++# LT_PATH_LD ++# ---------- ++# find the pathname to the GNU or non-GNU linker ++AC_DEFUN([LT_PATH_LD], ++[AC_REQUIRE([AC_PROG_CC])dnl ++AC_REQUIRE([AC_CANONICAL_HOST])dnl ++AC_REQUIRE([AC_CANONICAL_BUILD])dnl ++m4_require([_LT_DECL_SED])dnl ++m4_require([_LT_DECL_EGREP])dnl ++m4_require([_LT_PROG_ECHO_BACKSLASH])dnl ++ ++AC_ARG_WITH([gnu-ld], ++ [AS_HELP_STRING([--with-gnu-ld], ++ [assume the C compiler uses GNU ld @<:@default=no@:>@])], ++ [test no = "$withval" || with_gnu_ld=yes], ++ [with_gnu_ld=no])dnl ++ ++ac_prog=ld ++if test yes = "$GCC"; then ++ # Check if gcc -print-prog-name=ld gives a path. ++ AC_MSG_CHECKING([for ld used by $CC]) ++ case $host in ++ *-*-mingw*) ++ # gcc leaves a trailing carriage return, which upsets mingw ++ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; ++ *) ++ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; ++ esac ++ case $ac_prog in ++ # Accept absolute paths. ++ [[\\/]]* | ?:[[\\/]]*) ++ re_direlt='/[[^/]][[^/]]*/\.\./' ++ # Canonicalize the pathname of ld ++ ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` ++ while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ++ ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` ++ done ++ test -z "$LD" && LD=$ac_prog ++ ;; ++ "") ++ # If it fails, then pretend we aren't using GCC. ++ ac_prog=ld ++ ;; ++ *) ++ # If it is relative, then search for the first ld in PATH. ++ with_gnu_ld=unknown ++ ;; ++ esac ++elif test yes = "$with_gnu_ld"; then ++ AC_MSG_CHECKING([for GNU ld]) ++else ++ AC_MSG_CHECKING([for non-GNU ld]) ++fi ++AC_CACHE_VAL(lt_cv_path_LD, ++[if test -z "$LD"; then ++ lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ++ for ac_dir in $PATH; do ++ IFS=$lt_save_ifs ++ test -z "$ac_dir" && ac_dir=. ++ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then ++ lt_cv_path_LD=$ac_dir/$ac_prog ++ # Check to see if the program is GNU ld. I'd rather use --version, ++ # but apparently some variants of GNU ld only accept -v. ++ # Break only if it was the GNU/non-GNU ld that we prefer. ++ case `"$lt_cv_path_LD" -v 2>&1 &1 conftest.i ++cat conftest.i conftest.i >conftest2.i ++: ${lt_DD:=$DD} ++AC_PATH_PROGS_FEATURE_CHECK([lt_DD], [dd], ++[if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then ++ cmp -s conftest.i conftest.out \ ++ && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: ++fi]) ++rm -f conftest.i conftest2.i conftest.out]) ++])# _LT_PATH_DD ++ ++ ++# _LT_CMD_TRUNCATE ++# ---------------- ++# find command to truncate a binary pipe ++m4_defun([_LT_CMD_TRUNCATE], ++[m4_require([_LT_PATH_DD]) ++AC_CACHE_CHECK([how to truncate binary pipes], [lt_cv_truncate_bin], ++[printf 0123456789abcdef0123456789abcdef >conftest.i ++cat conftest.i conftest.i >conftest2.i ++lt_cv_truncate_bin= ++if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then ++ cmp -s conftest.i conftest.out \ ++ && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" ++fi ++rm -f conftest.i conftest2.i conftest.out ++test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q"]) ++_LT_DECL([lt_truncate_bin], [lt_cv_truncate_bin], [1], ++ [Command to truncate a binary pipe]) ++])# _LT_CMD_TRUNCATE ++ ++ ++# _LT_CHECK_MAGIC_METHOD ++# ---------------------- ++# how to check for library dependencies ++# -- PORTME fill in with the dynamic library characteristics ++m4_defun([_LT_CHECK_MAGIC_METHOD], ++[m4_require([_LT_DECL_EGREP]) ++m4_require([_LT_DECL_OBJDUMP]) ++AC_CACHE_CHECK([how to recognize dependent libraries], ++lt_cv_deplibs_check_method, ++[lt_cv_file_magic_cmd='$MAGIC_CMD' ++lt_cv_file_magic_test_file= ++lt_cv_deplibs_check_method='unknown' ++# Need to set the preceding variable on all platforms that support ++# interlibrary dependencies. ++# 'none' -- dependencies not supported. ++# 'unknown' -- same as none, but documents that we really don't know. ++# 'pass_all' -- all dependencies passed with no checks. ++# 'test_compile' -- check by making test program. ++# 'file_magic [[regex]]' -- check by looking for files in library path ++# that responds to the $file_magic_cmd with a given extended regex. ++# If you have 'file' or equivalent on your system and you're not sure ++# whether 'pass_all' will *always* work, you probably want this one. ++ ++case $host_os in ++aix[[4-9]]*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++beos*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++bsdi[[45]]*) ++ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)' ++ lt_cv_file_magic_cmd='$FILECMD -L' ++ lt_cv_file_magic_test_file=/shlib/libc.so ++ ;; ++ ++cygwin*) ++ # func_win32_libid is a shell function defined in ltmain.sh ++ lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' ++ lt_cv_file_magic_cmd='func_win32_libid' ++ ;; ++ ++mingw* | pw32*) ++ # Base MSYS/MinGW do not provide the 'file' command needed by ++ # func_win32_libid shell function, so use a weaker test based on 'objdump', ++ # unless we find 'file', for example because we are cross-compiling. ++ if ( file / ) >/dev/null 2>&1; then ++ lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' ++ lt_cv_file_magic_cmd='func_win32_libid' ++ else ++ # Keep this pattern in sync with the one in func_win32_libid. ++ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' ++ lt_cv_file_magic_cmd='$OBJDUMP -f' ++ fi ++ ;; ++ ++cegcc*) ++ # use the weaker test based on 'objdump'. See mingw*. ++ lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' ++ lt_cv_file_magic_cmd='$OBJDUMP -f' ++ ;; ++ ++darwin* | rhapsody*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++freebsd* | dragonfly* | midnightbsd*) ++ if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then ++ case $host_cpu in ++ i*86 ) ++ # Not sure whether the presence of OpenBSD here was a mistake. ++ # Let's accept both of them until this is cleared up. ++ lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' ++ lt_cv_file_magic_cmd=$FILECMD ++ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ++ ;; ++ esac ++ else ++ lt_cv_deplibs_check_method=pass_all ++ fi ++ ;; ++ ++haiku*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++hpux10.20* | hpux11*) ++ lt_cv_file_magic_cmd=$FILECMD ++ case $host_cpu in ++ ia64*) ++ lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' ++ lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ++ ;; ++ hppa*64*) ++ [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'] ++ lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ++ ;; ++ *) ++ lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library' ++ lt_cv_file_magic_test_file=/usr/lib/libc.sl ++ ;; ++ esac ++ ;; ++ ++interix[[3-9]]*) ++ # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here ++ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' ++ ;; ++ ++irix5* | irix6* | nonstopux*) ++ case $LD in ++ *-32|*"-32 ") libmagic=32-bit;; ++ *-n32|*"-n32 ") libmagic=N32;; ++ *-64|*"-64 ") libmagic=64-bit;; ++ *) libmagic=never-match;; ++ esac ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++# This must be glibc/ELF. ++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++netbsd*) ++ if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then ++ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' ++ else ++ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' ++ fi ++ ;; ++ ++newos6*) ++ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' ++ lt_cv_file_magic_cmd=$FILECMD ++ lt_cv_file_magic_test_file=/usr/lib/libnls.so ++ ;; ++ ++*nto* | *qnx*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++openbsd* | bitrig*) ++ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then ++ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' ++ else ++ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' ++ fi ++ ;; ++ ++osf3* | osf4* | osf5*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++rdos*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++solaris*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++sysv4 | sysv4.3*) ++ case $host_vendor in ++ motorola) ++ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' ++ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ++ ;; ++ ncr) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ sequent) ++ lt_cv_file_magic_cmd='/bin/file' ++ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' ++ ;; ++ sni) ++ lt_cv_file_magic_cmd='/bin/file' ++ lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" ++ lt_cv_file_magic_test_file=/lib/libc.so ++ ;; ++ siemens) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ pc) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ esac ++ ;; ++ ++tpf*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++os2*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++esac ++]) ++ ++file_magic_glob= ++want_nocaseglob=no ++if test "$build" = "$host"; then ++ case $host_os in ++ mingw* | pw32*) ++ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then ++ want_nocaseglob=yes ++ else ++ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` ++ fi ++ ;; ++ esac ++fi ++ ++file_magic_cmd=$lt_cv_file_magic_cmd ++deplibs_check_method=$lt_cv_deplibs_check_method ++test -z "$deplibs_check_method" && deplibs_check_method=unknown ++ ++_LT_DECL([], [deplibs_check_method], [1], ++ [Method to check whether dependent libraries are shared objects]) ++_LT_DECL([], [file_magic_cmd], [1], ++ [Command to use when deplibs_check_method = "file_magic"]) ++_LT_DECL([], [file_magic_glob], [1], ++ [How to find potential files when deplibs_check_method = "file_magic"]) ++_LT_DECL([], [want_nocaseglob], [1], ++ [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) ++])# _LT_CHECK_MAGIC_METHOD ++ ++ ++# LT_PATH_NM ++# ---------- ++# find the pathname to a BSD- or MS-compatible name lister ++AC_DEFUN([LT_PATH_NM], ++[AC_REQUIRE([AC_PROG_CC])dnl ++AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, ++[if test -n "$NM"; then ++ # Let the user override the test. ++ lt_cv_path_NM=$NM ++else ++ lt_nm_to_check=${ac_tool_prefix}nm ++ if test -n "$ac_tool_prefix" && test "$build" = "$host"; then ++ lt_nm_to_check="$lt_nm_to_check nm" ++ fi ++ for lt_tmp_nm in $lt_nm_to_check; do ++ lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ++ for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do ++ IFS=$lt_save_ifs ++ test -z "$ac_dir" && ac_dir=. ++ tmp_nm=$ac_dir/$lt_tmp_nm ++ if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then ++ # Check to see if the nm accepts a BSD-compat flag. ++ # Adding the 'sed 1q' prevents false positives on HP-UX, which says: ++ # nm: unknown option "B" ignored ++ # Tru64's nm complains that /dev/null is an invalid object file ++ # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty ++ case $build_os in ++ mingw*) lt_bad_file=conftest.nm/nofile ;; ++ *) lt_bad_file=/dev/null ;; ++ esac ++ case `"$tmp_nm" -B $lt_bad_file 2>&1 | $SED '1q'` in ++ *$lt_bad_file* | *'Invalid file or object type'*) ++ lt_cv_path_NM="$tmp_nm -B" ++ break 2 ++ ;; ++ *) ++ case `"$tmp_nm" -p /dev/null 2>&1 | $SED '1q'` in ++ */dev/null*) ++ lt_cv_path_NM="$tmp_nm -p" ++ break 2 ++ ;; ++ *) ++ lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but ++ continue # so that we can try to find one that supports BSD flags ++ ;; ++ esac ++ ;; ++ esac ++ fi ++ done ++ IFS=$lt_save_ifs ++ done ++ : ${lt_cv_path_NM=no} ++fi]) ++if test no != "$lt_cv_path_NM"; then ++ NM=$lt_cv_path_NM ++else ++ # Didn't find any BSD compatible name lister, look for dumpbin. ++ if test -n "$DUMPBIN"; then : ++ # Let the user override the test. ++ else ++ AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :) ++ case `$DUMPBIN -symbols -headers /dev/null 2>&1 | $SED '1q'` in ++ *COFF*) ++ DUMPBIN="$DUMPBIN -symbols -headers" ++ ;; ++ *) ++ DUMPBIN=: ++ ;; ++ esac ++ fi ++ AC_SUBST([DUMPBIN]) ++ if test : != "$DUMPBIN"; then ++ NM=$DUMPBIN ++ fi ++fi ++test -z "$NM" && NM=nm ++AC_SUBST([NM]) ++_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl ++ ++AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], ++ [lt_cv_nm_interface="BSD nm" ++ echo "int some_variable = 0;" > conftest.$ac_ext ++ (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD) ++ (eval "$ac_compile" 2>conftest.err) ++ cat conftest.err >&AS_MESSAGE_LOG_FD ++ (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) ++ (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) ++ cat conftest.err >&AS_MESSAGE_LOG_FD ++ (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD) ++ cat conftest.out >&AS_MESSAGE_LOG_FD ++ if $GREP 'External.*some_variable' conftest.out > /dev/null; then ++ lt_cv_nm_interface="MS dumpbin" ++ fi ++ rm -f conftest*]) ++])# LT_PATH_NM ++ ++# Old names: ++AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) ++AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AM_PROG_NM], []) ++dnl AC_DEFUN([AC_PROG_NM], []) ++ ++# _LT_CHECK_SHAREDLIB_FROM_LINKLIB ++# -------------------------------- ++# how to determine the name of the shared library ++# associated with a specific link library. ++# -- PORTME fill in with the dynamic library characteristics ++m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], ++[m4_require([_LT_DECL_EGREP]) ++m4_require([_LT_DECL_OBJDUMP]) ++m4_require([_LT_DECL_DLLTOOL]) ++AC_CACHE_CHECK([how to associate runtime and link libraries], ++lt_cv_sharedlib_from_linklib_cmd, ++[lt_cv_sharedlib_from_linklib_cmd='unknown' ++ ++case $host_os in ++cygwin* | mingw* | pw32* | cegcc*) ++ # two different shell functions defined in ltmain.sh; ++ # decide which one to use based on capabilities of $DLLTOOL ++ case `$DLLTOOL --help 2>&1` in ++ *--identify-strict*) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ++ ;; ++ *) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ++ ;; ++ esac ++ ;; ++*) ++ # fallback: assume linklib IS sharedlib ++ lt_cv_sharedlib_from_linklib_cmd=$ECHO ++ ;; ++esac ++]) ++sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd ++test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO ++ ++_LT_DECL([], [sharedlib_from_linklib_cmd], [1], ++ [Command to associate shared and link libraries]) ++])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB ++ ++ ++# _LT_PATH_MANIFEST_TOOL ++# ---------------------- ++# locate the manifest tool ++m4_defun([_LT_PATH_MANIFEST_TOOL], ++[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) ++test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt ++AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], ++ [lt_cv_path_mainfest_tool=no ++ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD ++ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out ++ cat conftest.err >&AS_MESSAGE_LOG_FD ++ if $GREP 'Manifest Tool' conftest.out > /dev/null; then ++ lt_cv_path_mainfest_tool=yes ++ fi ++ rm -f conftest*]) ++if test yes != "$lt_cv_path_mainfest_tool"; then ++ MANIFEST_TOOL=: ++fi ++_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl ++])# _LT_PATH_MANIFEST_TOOL ++ ++ ++# _LT_DLL_DEF_P([FILE]) ++# --------------------- ++# True iff FILE is a Windows DLL '.def' file. ++# Keep in sync with func_dll_def_p in the libtool script ++AC_DEFUN([_LT_DLL_DEF_P], ++[dnl ++ test DEF = "`$SED -n dnl ++ -e '\''s/^[[ ]]*//'\'' dnl Strip leading whitespace ++ -e '\''/^\(;.*\)*$/d'\'' dnl Delete empty lines and comments ++ -e '\''s/^\(EXPORTS\|LIBRARY\)\([[ ]].*\)*$/DEF/p'\'' dnl ++ -e q dnl Only consider the first "real" line ++ $1`" dnl ++])# _LT_DLL_DEF_P ++ ++ ++# LT_LIB_M ++# -------- ++# check for math library ++AC_DEFUN([LT_LIB_M], ++[AC_REQUIRE([AC_CANONICAL_HOST])dnl ++LIBM= ++case $host in ++*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*) ++ # These system don't have libm, or don't need it ++ ;; ++*-ncr-sysv4.3*) ++ AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM=-lmw) ++ AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") ++ ;; ++*) ++ AC_CHECK_LIB(m, cos, LIBM=-lm) ++ ;; ++esac ++AC_SUBST([LIBM]) ++])# LT_LIB_M ++ ++# Old name: ++AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AC_CHECK_LIBM], []) ++ ++ ++# _LT_COMPILER_NO_RTTI([TAGNAME]) ++# ------------------------------- ++m4_defun([_LT_COMPILER_NO_RTTI], ++[m4_require([_LT_TAG_COMPILER])dnl ++ ++_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= ++ ++if test yes = "$GCC"; then ++ case $cc_basename in ++ nvcc*) ++ _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;; ++ *) ++ _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;; ++ esac ++ ++ _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], ++ lt_cv_prog_compiler_rtti_exceptions, ++ [-fno-rtti -fno-exceptions], [], ++ [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) ++fi ++_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], ++ [Compiler flag to turn off builtin functions]) ++])# _LT_COMPILER_NO_RTTI ++ ++ ++# _LT_CMD_GLOBAL_SYMBOLS ++# ---------------------- ++m4_defun([_LT_CMD_GLOBAL_SYMBOLS], ++[AC_REQUIRE([AC_CANONICAL_HOST])dnl ++AC_REQUIRE([AC_PROG_CC])dnl ++AC_REQUIRE([AC_PROG_AWK])dnl ++AC_REQUIRE([LT_PATH_NM])dnl ++AC_REQUIRE([LT_PATH_LD])dnl ++m4_require([_LT_DECL_SED])dnl ++m4_require([_LT_DECL_EGREP])dnl ++m4_require([_LT_TAG_COMPILER])dnl ++ ++# Check for command to grab the raw symbol name followed by C symbol from nm. ++AC_MSG_CHECKING([command to parse $NM output from $compiler object]) ++AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], ++[ ++# These are sane defaults that work on at least a few old systems. ++# [They come from Ultrix. What could be older than Ultrix?!! ;)] ++ ++# Character class describing NM global symbol codes. ++symcode='[[BCDEGRST]]' ++ ++# Regexp to match symbols that can be accessed directly from C. ++sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' ++ ++# Define system-specific variables. ++case $host_os in ++aix*) ++ symcode='[[BCDT]]' ++ ;; ++cygwin* | mingw* | pw32* | cegcc*) ++ symcode='[[ABCDGISTW]]' ++ ;; ++hpux*) ++ if test ia64 = "$host_cpu"; then ++ symcode='[[ABCDEGRST]]' ++ fi ++ ;; ++irix* | nonstopux*) ++ symcode='[[BCDEGRST]]' ++ ;; ++osf*) ++ symcode='[[BCDEGQRST]]' ++ ;; ++solaris*) ++ symcode='[[BDRT]]' ++ ;; ++sco3.2v5*) ++ symcode='[[DT]]' ++ ;; ++sysv4.2uw2*) ++ symcode='[[DT]]' ++ ;; ++sysv5* | sco5v6* | unixware* | OpenUNIX*) ++ symcode='[[ABDT]]' ++ ;; ++sysv4) ++ symcode='[[DFNSTU]]' ++ ;; ++esac ++ ++# If we're using GNU nm, then use its standard symbol codes. ++case `$NM -V 2>&1` in ++*GNU* | *'with BFD'*) ++ symcode='[[ABCDGIRSTW]]' ;; ++esac ++ ++if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ # Gets list of data symbols to import. ++ lt_cv_sys_global_symbol_to_import="$SED -n -e 's/^I .* \(.*\)$/\1/p'" ++ # Adjust the below global symbol transforms to fixup imported variables. ++ lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" ++ lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" ++ lt_c_name_lib_hook="\ ++ -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ ++ -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" ++else ++ # Disable hooks by default. ++ lt_cv_sys_global_symbol_to_import= ++ lt_cdecl_hook= ++ lt_c_name_hook= ++ lt_c_name_lib_hook= ++fi ++ ++# Transform an extracted symbol line into a proper C declaration. ++# Some systems (esp. on ia64) link data and code symbols differently, ++# so use this general approach. ++lt_cv_sys_global_symbol_to_cdecl="$SED -n"\ ++$lt_cdecl_hook\ ++" -e 's/^T .* \(.*\)$/extern int \1();/p'"\ ++" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" ++ ++# Transform an extracted symbol line into symbol name and symbol address ++lt_cv_sys_global_symbol_to_c_name_address="$SED -n"\ ++$lt_c_name_hook\ ++" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ ++" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" ++ ++# Transform an extracted symbol line into symbol name with lib prefix and ++# symbol address. ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="$SED -n"\ ++$lt_c_name_lib_hook\ ++" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ ++" -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ ++" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" ++ ++# Handle CRLF in mingw tool chain ++opt_cr= ++case $build_os in ++mingw*) ++ opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ++ ;; ++esac ++ ++# Try without a prefix underscore, then with it. ++for ac_symprfx in "" "_"; do ++ ++ # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. ++ symxfrm="\\1 $ac_symprfx\\2 \\2" ++ ++ # Write the raw and C identifiers. ++ if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ # Fake it for dumpbin and say T for any non-static function, ++ # D for any global variable and I for any imported variable. ++ # Also find C++ and __fastcall symbols from MSVC++ or ICC, ++ # which start with @ or ?. ++ lt_cv_sys_global_symbol_pipe="$AWK ['"\ ++" {last_section=section; section=\$ 3};"\ ++" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ ++" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ ++" /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ ++" /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ ++" /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ ++" \$ 0!~/External *\|/{next};"\ ++" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ ++" {if(hide[section]) next};"\ ++" {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ ++" {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ ++" s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ ++" s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ ++" ' prfx=^$ac_symprfx]" ++ else ++ lt_cv_sys_global_symbol_pipe="$SED -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" ++ fi ++ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | $SED '/ __gnu_lto/d'" ++ ++ # Check to see that the pipe works correctly. ++ pipe_works=no ++ ++ rm -f conftest* ++ cat > conftest.$ac_ext <<_LT_EOF ++#ifdef __cplusplus ++extern "C" { ++#endif ++char nm_test_var; ++void nm_test_func(void); ++void nm_test_func(void){} ++#ifdef __cplusplus ++} ++#endif ++int main(){nm_test_var='a';nm_test_func();return(0);} ++_LT_EOF ++ ++ if AC_TRY_EVAL(ac_compile); then ++ # Now try to grab the symbols. ++ nlist=conftest.nm ++ if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then ++ # Try sorting and uniquifying the output. ++ if sort "$nlist" | uniq > "$nlist"T; then ++ mv -f "$nlist"T "$nlist" ++ else ++ rm -f "$nlist"T ++ fi ++ ++ # Make sure that we snagged all the symbols we need. ++ if $GREP ' nm_test_var$' "$nlist" >/dev/null; then ++ if $GREP ' nm_test_func$' "$nlist" >/dev/null; then ++ cat <<_LT_EOF > conftest.$ac_ext ++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ ++#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE ++/* DATA imports from DLLs on WIN32 can't be const, because runtime ++ relocations are performed -- see ld's documentation on pseudo-relocs. */ ++# define LT@&t@_DLSYM_CONST ++#elif defined __osf__ ++/* This system does not cope well with relocations in const data. */ ++# define LT@&t@_DLSYM_CONST ++#else ++# define LT@&t@_DLSYM_CONST const ++#endif ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++_LT_EOF ++ # Now generate the symbol file. ++ eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' ++ ++ cat <<_LT_EOF >> conftest.$ac_ext ++ ++/* The mapping between symbol names and symbols. */ ++LT@&t@_DLSYM_CONST struct { ++ const char *name; ++ void *address; ++} ++lt__PROGRAM__LTX_preloaded_symbols[[]] = ++{ ++ { "@PROGRAM@", (void *) 0 }, ++_LT_EOF ++ $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext ++ cat <<\_LT_EOF >> conftest.$ac_ext ++ {0, (void *) 0} ++}; ++ ++/* This works around a problem in FreeBSD linker */ ++#ifdef FREEBSD_WORKAROUND ++static const void *lt_preloaded_setup() { ++ return lt__PROGRAM__LTX_preloaded_symbols; ++} ++#endif ++ ++#ifdef __cplusplus ++} ++#endif ++_LT_EOF ++ # Now try linking the two files. ++ mv conftest.$ac_objext conftstm.$ac_objext ++ lt_globsym_save_LIBS=$LIBS ++ lt_globsym_save_CFLAGS=$CFLAGS ++ LIBS=conftstm.$ac_objext ++ CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" ++ if AC_TRY_EVAL(ac_link) && test -s conftest$ac_exeext; then ++ pipe_works=yes ++ fi ++ LIBS=$lt_globsym_save_LIBS ++ CFLAGS=$lt_globsym_save_CFLAGS ++ else ++ echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD ++ fi ++ else ++ echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD ++ fi ++ else ++ echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD ++ fi ++ else ++ echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD ++ cat conftest.$ac_ext >&5 ++ fi ++ rm -rf conftest* conftst* ++ ++ # Do not use the global_symbol_pipe unless it works. ++ if test yes = "$pipe_works"; then ++ break ++ else ++ lt_cv_sys_global_symbol_pipe= ++ fi ++done ++]) ++if test -z "$lt_cv_sys_global_symbol_pipe"; then ++ lt_cv_sys_global_symbol_to_cdecl= ++fi ++if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then ++ AC_MSG_RESULT(failed) ++else ++ AC_MSG_RESULT(ok) ++fi ++ ++# Response file support. ++if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ nm_file_list_spec='@' ++elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then ++ nm_file_list_spec='@' ++fi ++ ++_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], ++ [Take the output of nm and produce a listing of raw symbols and C names]) ++_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], ++ [Transform the output of nm in a proper C declaration]) ++_LT_DECL([global_symbol_to_import], [lt_cv_sys_global_symbol_to_import], [1], ++ [Transform the output of nm into a list of symbols to manually relocate]) ++_LT_DECL([global_symbol_to_c_name_address], ++ [lt_cv_sys_global_symbol_to_c_name_address], [1], ++ [Transform the output of nm in a C name address pair]) ++_LT_DECL([global_symbol_to_c_name_address_lib_prefix], ++ [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], ++ [Transform the output of nm in a C name address pair when lib prefix is needed]) ++_LT_DECL([nm_interface], [lt_cv_nm_interface], [1], ++ [The name lister interface]) ++_LT_DECL([], [nm_file_list_spec], [1], ++ [Specify filename containing input files for $NM]) ++]) # _LT_CMD_GLOBAL_SYMBOLS ++ ++ ++# _LT_COMPILER_PIC([TAGNAME]) ++# --------------------------- ++m4_defun([_LT_COMPILER_PIC], ++[m4_require([_LT_TAG_COMPILER])dnl ++_LT_TAGVAR(lt_prog_compiler_wl, $1)= ++_LT_TAGVAR(lt_prog_compiler_pic, $1)= ++_LT_TAGVAR(lt_prog_compiler_static, $1)= ++ ++m4_if([$1], [CXX], [ ++ # C++ specific cases for pic, static, wl, etc. ++ if test yes = "$GXX"; then ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ++ ++ case $host_os in ++ aix*) ++ # All AIX code is PIC. ++ if test ia64 = "$host_cpu"; then ++ # AIX 5 now supports IA64 processor ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ fi ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ++ ;; ++ ++ amigaos*) ++ case $host_cpu in ++ powerpc) ++ # see comment about AmigaOS4 .so support ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ++ ;; ++ m68k) ++ # FIXME: we need at least 68020 code to build shared libraries, but ++ # adding the '-m68020' flag to GCC prevents building anything better, ++ # like '-m68040'. ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ++ ;; ++ esac ++ ;; ++ ++ beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) ++ # PIC is the default for these OSes. ++ ;; ++ mingw* | cygwin* | os2* | pw32* | cegcc*) ++ # This hack is so that the source file can tell whether it is being ++ # built for inclusion in a dll (and should export symbols for example). ++ # Although the cygwin gcc ignores -fPIC, still need this for old-style ++ # (--disable-auto-import) libraries ++ m4_if([$1], [GCJ], [], ++ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ++ case $host_os in ++ os2*) ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' ++ ;; ++ esac ++ ;; ++ darwin* | rhapsody*) ++ # PIC is the default on this platform ++ # Common symbols not allowed in MH_DYLIB files ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ++ ;; ++ *djgpp*) ++ # DJGPP does not support shared libraries at all ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)= ++ ;; ++ haiku*) ++ # PIC is the default for Haiku. ++ # The "-static" flag exists, but is broken. ++ _LT_TAGVAR(lt_prog_compiler_static, $1)= ++ ;; ++ interix[[3-9]]*) ++ # Interix 3.x gcc -fpic/-fPIC options generate broken code. ++ # Instead, we relocate shared libraries at runtime. ++ ;; ++ sysv4*MP*) ++ if test -d /usr/nec; then ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic ++ fi ++ ;; ++ hpux*) ++ # PIC is the default for 64-bit PA HP-UX, but not for 32-bit ++ # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag ++ # sets the default TLS model and affects inlining. ++ case $host_cpu in ++ hppa*64*) ++ ;; ++ *) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ++ ;; ++ esac ++ ;; ++ *qnx* | *nto*) ++ # QNX uses GNU C++, but need to define -shared option too, otherwise ++ # it will coredump. ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ++ ;; ++ *) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ++ ;; ++ esac ++ else ++ case $host_os in ++ aix[[4-9]]*) ++ # All AIX code is PIC. ++ if test ia64 = "$host_cpu"; then ++ # AIX 5 now supports IA64 processor ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ else ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' ++ fi ++ ;; ++ chorus*) ++ case $cc_basename in ++ cxch68*) ++ # Green Hills C++ Compiler ++ # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ++ ;; ++ esac ++ ;; ++ mingw* | cygwin* | os2* | pw32* | cegcc*) ++ # This hack is so that the source file can tell whether it is being ++ # built for inclusion in a dll (and should export symbols for example). ++ m4_if([$1], [GCJ], [], ++ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ++ ;; ++ dgux*) ++ case $cc_basename in ++ ec++*) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ ;; ++ ghcx*) ++ # Green Hills C++ Compiler ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ freebsd* | dragonfly* | midnightbsd*) ++ # FreeBSD uses GNU C++ ++ ;; ++ hpux9* | hpux10* | hpux11*) ++ case $cc_basename in ++ CC*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' ++ if test ia64 != "$host_cpu"; then ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ++ fi ++ ;; ++ aCC*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' ++ case $host_cpu in ++ hppa*64*|ia64*) ++ # +Z the default ++ ;; ++ *) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ++ ;; ++ esac ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ interix*) ++ # This is c89, which is MS Visual C++ (no shared libs) ++ # Anyone wants to do a port? ++ ;; ++ irix5* | irix6* | nonstopux*) ++ case $cc_basename in ++ CC*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ++ # CC pic flag -KPIC is the default. ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ case $cc_basename in ++ KCC*) ++ # KAI C++ Compiler ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ++ ;; ++ ecpc* ) ++ # old Intel C++ for x86_64, which still supported -KPIC. ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ++ ;; ++ icpc* ) ++ # Intel C++, used to be incompatible with GCC. ++ # ICC 10 doesn't accept -KPIC any more. ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ++ ;; ++ pgCC* | pgcpp*) ++ # Portland Group C++ compiler ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ ;; ++ cxx*) ++ # Compaq C++ ++ # Make sure the PIC flag is empty. It appears that all Alpha ++ # Linux and Compaq Tru64 Unix objects are PIC. ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)= ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ++ ;; ++ xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*) ++ # IBM XL 8.0, 9.0 on PPC and BlueGene ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ++ ;; ++ *) ++ case `$CC -V 2>&1 | $SED 5q` in ++ *Sun\ C*) ++ # Sun C++ 5.9 ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ++ ;; ++ esac ++ ;; ++ esac ++ ;; ++ lynxos*) ++ ;; ++ m88k*) ++ ;; ++ mvs*) ++ case $cc_basename in ++ cxx*) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ netbsd*) ++ ;; ++ *qnx* | *nto*) ++ # QNX uses GNU C++, but need to define -shared option too, otherwise ++ # it will coredump. ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ++ ;; ++ osf3* | osf4* | osf5*) ++ case $cc_basename in ++ KCC*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' ++ ;; ++ RCC*) ++ # Rational C++ 2.4.1 ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ++ ;; ++ cxx*) ++ # Digital/Compaq C++ ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ # Make sure the PIC flag is empty. It appears that all Alpha ++ # Linux and Compaq Tru64 Unix objects are PIC. ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)= ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ psos*) ++ ;; ++ solaris*) ++ case $cc_basename in ++ CC* | sunCC*) ++ # Sun C++ 4.2, 5.x and Centerline C++ ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ++ ;; ++ gcx*) ++ # Green Hills C++ Compiler ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ sunos4*) ++ case $cc_basename in ++ CC*) ++ # Sun C++ 4.x ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ ;; ++ lcc*) ++ # Lucid ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) ++ case $cc_basename in ++ CC*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ ;; ++ esac ++ ;; ++ tandem*) ++ case $cc_basename in ++ NCC*) ++ # NonStop-UX NCC 3.20 ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ vxworks*) ++ ;; ++ *) ++ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ++ ;; ++ esac ++ fi ++], ++[ ++ if test yes = "$GCC"; then ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ++ ++ case $host_os in ++ aix*) ++ # All AIX code is PIC. ++ if test ia64 = "$host_cpu"; then ++ # AIX 5 now supports IA64 processor ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ fi ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ++ ;; ++ ++ amigaos*) ++ case $host_cpu in ++ powerpc) ++ # see comment about AmigaOS4 .so support ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ++ ;; ++ m68k) ++ # FIXME: we need at least 68020 code to build shared libraries, but ++ # adding the '-m68020' flag to GCC prevents building anything better, ++ # like '-m68040'. ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ++ ;; ++ esac ++ ;; ++ ++ beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) ++ # PIC is the default for these OSes. ++ ;; ++ ++ mingw* | cygwin* | pw32* | os2* | cegcc*) ++ # This hack is so that the source file can tell whether it is being ++ # built for inclusion in a dll (and should export symbols for example). ++ # Although the cygwin gcc ignores -fPIC, still need this for old-style ++ # (--disable-auto-import) libraries ++ m4_if([$1], [GCJ], [], ++ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ++ case $host_os in ++ os2*) ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' ++ ;; ++ esac ++ ;; ++ ++ darwin* | rhapsody*) ++ # PIC is the default on this platform ++ # Common symbols not allowed in MH_DYLIB files ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ++ ;; ++ ++ haiku*) ++ # PIC is the default for Haiku. ++ # The "-static" flag exists, but is broken. ++ _LT_TAGVAR(lt_prog_compiler_static, $1)= ++ ;; ++ ++ hpux*) ++ # PIC is the default for 64-bit PA HP-UX, but not for 32-bit ++ # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag ++ # sets the default TLS model and affects inlining. ++ case $host_cpu in ++ hppa*64*) ++ # +Z the default ++ ;; ++ *) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ++ ;; ++ esac ++ ;; ++ ++ interix[[3-9]]*) ++ # Interix 3.x gcc -fpic/-fPIC options generate broken code. ++ # Instead, we relocate shared libraries at runtime. ++ ;; ++ ++ msdosdjgpp*) ++ # Just because we use GCC doesn't mean we suddenly get shared libraries ++ # on systems that don't support them. ++ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ++ enable_shared=no ++ ;; ++ ++ *nto* | *qnx*) ++ # QNX uses GNU C++, but need to define -shared option too, otherwise ++ # it will coredump. ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ++ ;; ++ ++ sysv4*MP*) ++ if test -d /usr/nec; then ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic ++ fi ++ ;; ++ ++ *) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ++ ;; ++ esac ++ ++ case $cc_basename in ++ nvcc*) # Cuda Compiler Driver 2.2 ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker ' ++ if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ++ fi ++ ;; ++ esac ++ else ++ # PORTME Check for flag to pass linker flags through the system compiler. ++ case $host_os in ++ aix*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ if test ia64 = "$host_cpu"; then ++ # AIX 5 now supports IA64 processor ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ else ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' ++ fi ++ ;; ++ ++ darwin* | rhapsody*) ++ # PIC is the default on this platform ++ # Common symbols not allowed in MH_DYLIB files ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ++ case $cc_basename in ++ nagfor*) ++ # NAG Fortran compiler ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ ;; ++ esac ++ ;; ++ ++ mingw* | cygwin* | pw32* | os2* | cegcc*) ++ # This hack is so that the source file can tell whether it is being ++ # built for inclusion in a dll (and should export symbols for example). ++ m4_if([$1], [GCJ], [], ++ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ++ case $host_os in ++ os2*) ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' ++ ;; ++ esac ++ ;; ++ ++ hpux9* | hpux10* | hpux11*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but ++ # not for PA HP-UX. ++ case $host_cpu in ++ hppa*64*|ia64*) ++ # +Z the default ++ ;; ++ *) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ++ ;; ++ esac ++ # Is there a better lt_prog_compiler_static that works with the bundled CC? ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' ++ ;; ++ ++ irix5* | irix6* | nonstopux*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ # PIC (with -KPIC) is the default. ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ++ ;; ++ ++ linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ case $cc_basename in ++ # old Intel for x86_64, which still supported -KPIC. ++ ecc*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ++ ;; ++ # icc used to be incompatible with GCC. ++ # ICC 10 doesn't accept -KPIC any more. ++ icc* | ifort*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ++ ;; ++ # Lahey Fortran 8.1. ++ lf95*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' ++ ;; ++ nagfor*) ++ # NAG Fortran compiler ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ ;; ++ tcc*) ++ # Fabrice Bellard et al's Tiny C Compiler ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ++ ;; ++ pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) ++ # Portland Group compilers (*not* the Pentium gcc compiler, ++ # which looks to be a dead project) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ ;; ++ ccc*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ # All Alpha code is PIC. ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ++ ;; ++ xl* | bgxl* | bgf* | mpixl*) ++ # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ++ ;; ++ *) ++ case `$CC -V 2>&1 | $SED 5q` in ++ *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*) ++ # Sun Fortran 8.3 passes all unrecognized flags to the linker ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='' ++ ;; ++ *Sun\ F* | *Sun*Fortran*) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ++ ;; ++ *Sun\ C*) ++ # Sun C 5.9 ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ ;; ++ *Intel*\ [[CF]]*Compiler*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ++ ;; ++ *Portland\ Group*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ ;; ++ esac ++ ;; ++ esac ++ ;; ++ ++ newsos6) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ ;; ++ ++ *nto* | *qnx*) ++ # QNX uses GNU C++, but need to define -shared option too, otherwise ++ # it will coredump. ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ++ ;; ++ ++ osf3* | osf4* | osf5*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ # All OSF/1 code is PIC. ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ++ ;; ++ ++ rdos*) ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ++ ;; ++ ++ solaris*) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ case $cc_basename in ++ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; ++ *) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; ++ esac ++ ;; ++ ++ sunos4*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ ;; ++ ++ sysv4 | sysv4.2uw2* | sysv4.3*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ ;; ++ ++ sysv4*MP*) ++ if test -d /usr/nec; then ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ fi ++ ;; ++ ++ sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ ;; ++ ++ unicos*) ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ++ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ++ ;; ++ ++ uts4*) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ ;; ++ ++ *) ++ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ++ ;; ++ esac ++ fi ++]) ++case $host_os in ++ # For platforms that do not support PIC, -DPIC is meaningless: ++ *djgpp*) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)= ++ ;; ++ *) ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" ++ ;; ++esac ++ ++AC_CACHE_CHECK([for $compiler option to produce PIC], ++ [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], ++ [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) ++_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) ++ ++# ++# Check to make sure the PIC flag actually works. ++# ++if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then ++ _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], ++ [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], ++ [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], ++ [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in ++ "" | " "*) ;; ++ *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; ++ esac], ++ [_LT_TAGVAR(lt_prog_compiler_pic, $1)= ++ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) ++fi ++_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], ++ [Additional compiler flags for building library objects]) ++ ++_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], ++ [How to pass a linker flag through the compiler]) ++# ++# Check to make sure the static flag actually works. ++# ++wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" ++_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], ++ _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), ++ $lt_tmp_static_flag, ++ [], ++ [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) ++_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], ++ [Compiler flag to prevent dynamic linking]) ++])# _LT_COMPILER_PIC ++ ++ ++# _LT_LINKER_SHLIBS([TAGNAME]) ++# ---------------------------- ++# See if the linker supports building shared libraries. ++m4_defun([_LT_LINKER_SHLIBS], ++[AC_REQUIRE([LT_PATH_LD])dnl ++AC_REQUIRE([LT_PATH_NM])dnl ++m4_require([_LT_PATH_MANIFEST_TOOL])dnl ++m4_require([_LT_FILEUTILS_DEFAULTS])dnl ++m4_require([_LT_DECL_EGREP])dnl ++m4_require([_LT_DECL_SED])dnl ++m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl ++m4_require([_LT_TAG_COMPILER])dnl ++AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) ++m4_if([$1], [CXX], [ ++ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ++ _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] ++ case $host_os in ++ aix[[4-9]]*) ++ # If we're using GNU nm, then we don't want the "-C" option. ++ # -C means demangle to GNU nm, but means don't demangle to AIX nm. ++ # Without the "-l" option, or with the "-B" option, AIX nm treats ++ # weak defined symbols like other global defined symbols, whereas ++ # GNU nm marks them as "W". ++ # While the 'weak' keyword is ignored in the Export File, we need ++ # it in the Import File for the 'aix-soname' feature, so we have ++ # to replace the "-B" option with "-P" for AIX nm. ++ if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then ++ _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' ++ else ++ _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' ++ fi ++ ;; ++ pw32*) ++ _LT_TAGVAR(export_symbols_cmds, $1)=$ltdll_cmds ++ ;; ++ cygwin* | mingw* | cegcc*) ++ case $cc_basename in ++ cl* | icl*) ++ _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' ++ ;; ++ *) ++ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' ++ _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] ++ ;; ++ esac ++ ;; ++ *) ++ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ++ ;; ++ esac ++], [ ++ runpath_var= ++ _LT_TAGVAR(allow_undefined_flag, $1)= ++ _LT_TAGVAR(always_export_symbols, $1)=no ++ _LT_TAGVAR(archive_cmds, $1)= ++ _LT_TAGVAR(archive_expsym_cmds, $1)= ++ _LT_TAGVAR(compiler_needs_object, $1)=no ++ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)= ++ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ++ _LT_TAGVAR(hardcode_automatic, $1)=no ++ _LT_TAGVAR(hardcode_direct, $1)=no ++ _LT_TAGVAR(hardcode_direct_absolute, $1)=no ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= ++ _LT_TAGVAR(hardcode_libdir_separator, $1)= ++ _LT_TAGVAR(hardcode_minus_L, $1)=no ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported ++ _LT_TAGVAR(inherit_rpath, $1)=no ++ _LT_TAGVAR(link_all_deplibs, $1)=unknown ++ _LT_TAGVAR(module_cmds, $1)= ++ _LT_TAGVAR(module_expsym_cmds, $1)= ++ _LT_TAGVAR(old_archive_from_new_cmds, $1)= ++ _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= ++ _LT_TAGVAR(thread_safe_flag_spec, $1)= ++ _LT_TAGVAR(whole_archive_flag_spec, $1)= ++ # include_expsyms should be a list of space-separated symbols to be *always* ++ # included in the symbol list ++ _LT_TAGVAR(include_expsyms, $1)= ++ # exclude_expsyms can be an extended regexp of symbols to exclude ++ # it will be wrapped by ' (' and ')$', so one must not match beginning or ++ # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', ++ # as well as any symbol that contains 'd'. ++ _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] ++ # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out ++ # platforms (ab)use it in PIC code, but their linkers get confused if ++ # the symbol is explicitly referenced. Since portable code cannot ++ # rely on this symbol name, it's probably fine to never include it in ++ # preloaded symbol tables. ++ # Exclude shared library initialization/finalization symbols. ++dnl Note also adjust exclude_expsyms for C++ above. ++ extract_expsyms_cmds= ++ ++ case $host_os in ++ cygwin* | mingw* | pw32* | cegcc*) ++ # FIXME: the MSVC++ and ICC port hasn't been tested in a loooong time ++ # When not using gcc, we currently assume that we are using ++ # Microsoft Visual C++ or Intel C++ Compiler. ++ if test yes != "$GCC"; then ++ with_gnu_ld=no ++ fi ++ ;; ++ interix*) ++ # we just hope/assume this is gcc and not c89 (= MSVC++ or ICC) ++ with_gnu_ld=yes ++ ;; ++ openbsd* | bitrig*) ++ with_gnu_ld=no ++ ;; ++ esac ++ ++ _LT_TAGVAR(ld_shlibs, $1)=yes ++ ++ # On some targets, GNU ld is compatible enough with the native linker ++ # that we're better off using the native interface for both. ++ lt_use_gnu_ld_interface=no ++ if test yes = "$with_gnu_ld"; then ++ case $host_os in ++ aix*) ++ # The AIX port of GNU ld has always aspired to compatibility ++ # with the native linker. However, as the warning in the GNU ld ++ # block says, versions before 2.19.5* couldn't really create working ++ # shared libraries, regardless of the interface used. ++ case `$LD -v 2>&1` in ++ *\ \(GNU\ Binutils\)\ 2.19.5*) ;; ++ *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;; ++ *\ \(GNU\ Binutils\)\ [[3-9]]*) ;; ++ *) ++ lt_use_gnu_ld_interface=yes ++ ;; ++ esac ++ ;; ++ *) ++ lt_use_gnu_ld_interface=yes ++ ;; ++ esac ++ fi ++ ++ if test yes = "$lt_use_gnu_ld_interface"; then ++ # If archive_cmds runs LD, not CC, wlarc should be empty ++ wlarc='$wl' ++ ++ # Set some defaults for GNU ld with shared library support. These ++ # are reset later if shared libraries are not supported. Putting them ++ # here allows them to be overridden if necessary. ++ runpath_var=LD_RUN_PATH ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' ++ # ancient GNU ld didn't support --whole-archive et. al. ++ if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then ++ _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' ++ else ++ _LT_TAGVAR(whole_archive_flag_spec, $1)= ++ fi ++ supports_anon_versioning=no ++ case `$LD -v | $SED -e 's/([[^)]]\+)\s\+//' 2>&1` in ++ *GNU\ gold*) supports_anon_versioning=yes ;; ++ *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 ++ *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... ++ *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... ++ *\ 2.11.*) ;; # other 2.11 versions ++ *) supports_anon_versioning=yes ;; ++ esac ++ ++ # See if GNU ld supports shared libraries. ++ case $host_os in ++ aix[[3-9]]*) ++ # On AIX/PPC, the GNU linker is very broken ++ if test ia64 != "$host_cpu"; then ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ cat <<_LT_EOF 1>&2 ++ ++*** Warning: the GNU linker, at least up to release 2.19, is reported ++*** to be unable to reliably create shared libraries on AIX. ++*** Therefore, libtool is disabling shared libraries support. If you ++*** really care for shared libraries, you may want to install binutils ++*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. ++*** You will then need to restart the configuration process. ++ ++_LT_EOF ++ fi ++ ;; ++ ++ amigaos*) ++ case $host_cpu in ++ powerpc) ++ # see comment about AmigaOS4 .so support ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='' ++ ;; ++ m68k) ++ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes ++ ;; ++ esac ++ ;; ++ ++ beos*) ++ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ # Joseph Beckenbach says some releases of gcc ++ # support --undefined. This deserves some investigation. FIXME ++ _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ else ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ ++ cygwin* | mingw* | pw32* | cegcc*) ++ # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, ++ # as there is no search path for DLLs. ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols' ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ _LT_TAGVAR(always_export_symbols, $1)=no ++ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ++ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' ++ _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] ++ ++ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ # If the export-symbols file already is a .def file, use it as ++ # is; otherwise, prepend EXPORTS... ++ _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then ++ cp $export_symbols $output_objdir/$soname.def; ++ else ++ echo EXPORTS > $output_objdir/$soname.def; ++ cat $export_symbols >> $output_objdir/$soname.def; ++ fi~ ++ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ else ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ ++ haiku*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(link_all_deplibs, $1)=yes ++ ;; ++ ++ os2*) ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ shrext_cmds=.dll ++ _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ ++ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ ++ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ ++ $ECHO EXPORTS >> $output_objdir/$libname.def~ ++ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ ++ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ ++ emximp -o $lib $output_objdir/$libname.def' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ ++ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ ++ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ ++ $ECHO EXPORTS >> $output_objdir/$libname.def~ ++ prefix_cmds="$SED"~ ++ if test EXPORTS = "`$SED 1q $export_symbols`"; then ++ prefix_cmds="$prefix_cmds -e 1d"; ++ fi~ ++ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ ++ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ ++ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ ++ emximp -o $lib $output_objdir/$libname.def' ++ _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' ++ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ++ _LT_TAGVAR(file_list_spec, $1)='@' ++ ;; ++ ++ interix[[3-9]]*) ++ _LT_TAGVAR(hardcode_direct, $1)=no ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' ++ # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. ++ # Instead, shared libraries are loaded at an image base (0x10000000 by ++ # default) and relocated if they conflict, which is a slow very memory ++ # consuming and fragmenting process. To avoid this, we pick a random, ++ # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link ++ # time. Moving up from 0x10000000 also allows more sbrk(2) space. ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ++ ;; ++ ++ gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) ++ tmp_diet=no ++ if test linux-dietlibc = "$host_os"; then ++ case $cc_basename in ++ diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) ++ esac ++ fi ++ if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ ++ && test no = "$tmp_diet" ++ then ++ tmp_addflag=' $pic_flag' ++ tmp_sharedflag='-shared' ++ case $cc_basename,$host_cpu in ++ pgcc*) # Portland Group C compiler ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ tmp_addflag=' $pic_flag' ++ ;; ++ pgf77* | pgf90* | pgf95* | pgfortran*) ++ # Portland Group f77 and f90 compilers ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ tmp_addflag=' $pic_flag -Mnomain' ;; ++ ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 ++ tmp_addflag=' -i_dynamic' ;; ++ efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 ++ tmp_addflag=' -i_dynamic -nofor_main' ;; ++ ifc* | ifort*) # Intel Fortran compiler ++ tmp_addflag=' -nofor_main' ;; ++ lf95*) # Lahey Fortran 8.1 ++ _LT_TAGVAR(whole_archive_flag_spec, $1)= ++ tmp_sharedflag='--shared' ;; ++ nagfor*) # NAGFOR 5.3 ++ tmp_sharedflag='-Wl,-shared' ;; ++ xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) ++ tmp_sharedflag='-qmkshrobj' ++ tmp_addflag= ;; ++ nvcc*) # Cuda Compiler Driver 2.2 ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ _LT_TAGVAR(compiler_needs_object, $1)=yes ++ ;; ++ esac ++ case `$CC -V 2>&1 | $SED 5q` in ++ *Sun\ C*) # Sun C 5.9 ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ _LT_TAGVAR(compiler_needs_object, $1)=yes ++ tmp_sharedflag='-G' ;; ++ *Sun\ F*) # Sun Fortran 8.3 ++ tmp_sharedflag='-G' ;; ++ esac ++ _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ ++ if test yes = "$supports_anon_versioning"; then ++ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ ++ cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ ++ echo "local: *; };" >> $output_objdir/$libname.ver~ ++ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' ++ fi ++ ++ case $cc_basename in ++ tcc*) ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='-rdynamic' ++ ;; ++ xlf* | bgf* | bgxlf* | mpixlf*) ++ # IBM XL Fortran 10.1 on PPC cannot create shared libs itself ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ++ _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' ++ if test yes = "$supports_anon_versioning"; then ++ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ ++ cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ ++ echo "local: *; };" >> $output_objdir/$libname.ver~ ++ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' ++ fi ++ ;; ++ esac ++ else ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ ++ netbsd*) ++ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then ++ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' ++ wlarc= ++ else ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ fi ++ ;; ++ ++ solaris*) ++ if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ cat <<_LT_EOF 1>&2 ++ ++*** Warning: The releases 2.8.* of the GNU linker cannot reliably ++*** create shared libraries on Solaris systems. Therefore, libtool ++*** is disabling shared libraries support. We urge you to upgrade GNU ++*** binutils to release 2.9.1 or newer. Another option is to modify ++*** your PATH or compiler configuration so that the native linker is ++*** used, and then restart. ++ ++_LT_EOF ++ elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ else ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ ++ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) ++ case `$LD -v 2>&1` in ++ *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ cat <<_LT_EOF 1>&2 ++ ++*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot ++*** reliably create shared libraries on SCO systems. Therefore, libtool ++*** is disabling shared libraries support. We urge you to upgrade GNU ++*** binutils to release 2.16.91.0.3 or newer. Another option is to modify ++*** your PATH or compiler configuration so that the native linker is ++*** used, and then restart. ++ ++_LT_EOF ++ ;; ++ *) ++ # For security reasons, it is highly recommended that you always ++ # use absolute paths for naming shared libraries, and exclude the ++ # DT_RUNPATH tag from executables and libraries. But doing so ++ # requires that you compile everything twice, which is a pain. ++ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ else ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ esac ++ ;; ++ ++ sunos4*) ++ _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' ++ wlarc= ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ ;; ++ ++ *) ++ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ else ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ esac ++ ++ if test no = "$_LT_TAGVAR(ld_shlibs, $1)"; then ++ runpath_var= ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)= ++ _LT_TAGVAR(whole_archive_flag_spec, $1)= ++ fi ++ else ++ # PORTME fill in a description of your system's linker (not GNU ld) ++ case $host_os in ++ aix3*) ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ _LT_TAGVAR(always_export_symbols, $1)=yes ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' ++ # Note: this linker hardcodes the directories in LIBPATH if there ++ # are no directories specified by -L. ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes ++ if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then ++ # Neither direct hardcoding nor static linking is supported with a ++ # broken collect2. ++ _LT_TAGVAR(hardcode_direct, $1)=unsupported ++ fi ++ ;; ++ ++ aix[[4-9]]*) ++ if test ia64 = "$host_cpu"; then ++ # On IA64, the linker does run time linking by default, so we don't ++ # have to do anything special. ++ aix_use_runtimelinking=no ++ exp_sym_flag='-Bexport' ++ no_entry_flag= ++ else ++ # If we're using GNU nm, then we don't want the "-C" option. ++ # -C means demangle to GNU nm, but means don't demangle to AIX nm. ++ # Without the "-l" option, or with the "-B" option, AIX nm treats ++ # weak defined symbols like other global defined symbols, whereas ++ # GNU nm marks them as "W". ++ # While the 'weak' keyword is ignored in the Export File, we need ++ # it in the Import File for the 'aix-soname' feature, so we have ++ # to replace the "-B" option with "-P" for AIX nm. ++ if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then ++ _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' ++ else ++ _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' ++ fi ++ aix_use_runtimelinking=no ++ ++ # Test if we are trying to use run time linking or normal ++ # AIX style linking. If -brtl is somewhere in LDFLAGS, we ++ # have runtime linking enabled, and use it for executables. ++ # For shared libraries, we enable/disable runtime linking ++ # depending on the kind of the shared library created - ++ # when "with_aix_soname,aix_use_runtimelinking" is: ++ # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables ++ # "aix,yes" lib.so shared, rtl:yes, for executables ++ # lib.a static archive ++ # "both,no" lib.so.V(shr.o) shared, rtl:yes ++ # lib.a(lib.so.V) shared, rtl:no, for executables ++ # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables ++ # lib.a(lib.so.V) shared, rtl:no ++ # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables ++ # lib.a static archive ++ case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) ++ for ld_flag in $LDFLAGS; do ++ if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then ++ aix_use_runtimelinking=yes ++ break ++ fi ++ done ++ if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then ++ # With aix-soname=svr4, we create the lib.so.V shared archives only, ++ # so we don't have lib.a shared libs to link our executables. ++ # We have to force runtime linking in this case. ++ aix_use_runtimelinking=yes ++ LDFLAGS="$LDFLAGS -Wl,-brtl" ++ fi ++ ;; ++ esac ++ ++ exp_sym_flag='-bexport' ++ no_entry_flag='-bnoentry' ++ fi ++ ++ # When large executables or shared objects are built, AIX ld can ++ # have problems creating the table of contents. If linking a library ++ # or program results in "error TOC overflow" add -mminimal-toc to ++ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not ++ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. ++ ++ _LT_TAGVAR(archive_cmds, $1)='' ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=':' ++ _LT_TAGVAR(link_all_deplibs, $1)=yes ++ _LT_TAGVAR(file_list_spec, $1)='$wl-f,' ++ case $with_aix_soname,$aix_use_runtimelinking in ++ aix,*) ;; # traditional, no import file ++ svr4,* | *,yes) # use import file ++ # The Import File defines what to hardcode. ++ _LT_TAGVAR(hardcode_direct, $1)=no ++ _LT_TAGVAR(hardcode_direct_absolute, $1)=no ++ ;; ++ esac ++ ++ if test yes = "$GCC"; then ++ case $host_os in aix4.[[012]]|aix4.[[012]].*) ++ # We only want to do this on AIX 4.2 and lower, the check ++ # below for broken collect2 doesn't work under 4.3+ ++ collect2name=`$CC -print-prog-name=collect2` ++ if test -f "$collect2name" && ++ strings "$collect2name" | $GREP resolve_lib_name >/dev/null ++ then ++ # We have reworked collect2 ++ : ++ else ++ # We have old collect2 ++ _LT_TAGVAR(hardcode_direct, $1)=unsupported ++ # It fails to find uninstalled libraries when the uninstalled ++ # path is not listed in the libpath. Setting hardcode_minus_L ++ # to unsupported forces relinking ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)= ++ fi ++ ;; ++ esac ++ shared_flag='-shared' ++ if test yes = "$aix_use_runtimelinking"; then ++ shared_flag="$shared_flag "'$wl-G' ++ fi ++ # Need to ensure runtime linking is disabled for the traditional ++ # shared library, or the linker may eventually find shared libraries ++ # /with/ Import File - we do not want to mix them. ++ shared_flag_aix='-shared' ++ shared_flag_svr4='-shared $wl-G' ++ else ++ # not using gcc ++ if test ia64 = "$host_cpu"; then ++ # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release ++ # chokes on -Wl,-G. The following line is correct: ++ shared_flag='-G' ++ else ++ if test yes = "$aix_use_runtimelinking"; then ++ shared_flag='$wl-G' ++ else ++ shared_flag='$wl-bM:SRE' ++ fi ++ shared_flag_aix='$wl-bM:SRE' ++ shared_flag_svr4='$wl-G' ++ fi ++ fi ++ ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall' ++ # It seems that -bexpall does not export symbols beginning with ++ # underscore (_), so it is better to generate a list of symbols to export. ++ _LT_TAGVAR(always_export_symbols, $1)=yes ++ if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then ++ # Warning - without using the other runtime loading flags (-brtl), ++ # -berok will link without error, but may produce a broken library. ++ _LT_TAGVAR(allow_undefined_flag, $1)='-berok' ++ # Determine the default libpath from the value encoded in an ++ # empty executable. ++ _LT_SYS_MODULE_PATH_AIX([$1]) ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag ++ else ++ if test ia64 = "$host_cpu"; then ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib' ++ _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" ++ _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" ++ else ++ # Determine the default libpath from the value encoded in an ++ # empty executable. ++ _LT_SYS_MODULE_PATH_AIX([$1]) ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" ++ # Warning - without using the other run time loading flags, ++ # -berok will link without error, but may produce a broken library. ++ _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok' ++ _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok' ++ if test yes = "$with_gnu_ld"; then ++ # We only use this code for GNU lds that support --whole-archive. ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' ++ else ++ # Exported symbols can be pulled into shared objects from archives ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' ++ fi ++ _LT_TAGVAR(archive_cmds_need_lc, $1)=yes ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' ++ # -brtl affects multiple linker settings, -berok does not and is overridden later ++ compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`' ++ if test svr4 != "$with_aix_soname"; then ++ # This is similar to how AIX traditionally builds its shared libraries. ++ _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' ++ fi ++ if test aix != "$with_aix_soname"; then ++ _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' ++ else ++ # used by -dlpreopen to get the symbols ++ _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV $output_objdir/$realname.d/$soname $output_objdir' ++ fi ++ _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d' ++ fi ++ fi ++ ;; ++ ++ amigaos*) ++ case $host_cpu in ++ powerpc) ++ # see comment about AmigaOS4 .so support ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='' ++ ;; ++ m68k) ++ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes ++ ;; ++ esac ++ ;; ++ ++ bsdi[[45]]*) ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic ++ ;; ++ ++ cygwin* | mingw* | pw32* | cegcc*) ++ # When not using gcc, we currently assume that we are using ++ # Microsoft Visual C++ or Intel C++ Compiler. ++ # hardcode_libdir_flag_spec is actually meaningless, as there is ++ # no search path for DLLs. ++ case $cc_basename in ++ cl* | icl*) ++ # Native MSVC or ICC ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ _LT_TAGVAR(always_export_symbols, $1)=yes ++ _LT_TAGVAR(file_list_spec, $1)='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=.dll ++ # FIXME: Setting linknames here is a bad hack. ++ _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then ++ cp "$export_symbols" "$output_objdir/$soname.def"; ++ echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; ++ else ++ $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' ++ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ++ _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' ++ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' ++ # Don't use ranlib ++ _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' ++ _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile=$lt_outputfile.exe ++ lt_tool_outputfile=$lt_tool_outputfile.exe ++ ;; ++ esac~ ++ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # Assume MSVC and ICC wrapper ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=.dll ++ # FIXME: Setting linknames here is a bad hack. ++ _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' ++ # The linker will automatically build a .lib file if we build a DLL. ++ _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' ++ # FIXME: Should let the user specify the lib program. ++ _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' ++ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ++ ;; ++ esac ++ ;; ++ ++ darwin* | rhapsody*) ++ _LT_DARWIN_LINKER_FEATURES($1) ++ ;; ++ ++ dgux*) ++ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ ;; ++ ++ # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor ++ # support. Future versions do this automatically, but an explicit c++rt0.o ++ # does not break anything, and helps significantly (at the cost of a little ++ # extra space). ++ freebsd2.2*) ++ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ ;; ++ ++ # Unfortunately, older versions of FreeBSD 2 do not have this feature. ++ freebsd2.*) ++ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ ;; ++ ++ # FreeBSD 3 and greater uses gcc -shared to do shared libraries. ++ freebsd* | dragonfly* | midnightbsd*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ ;; ++ ++ hpux9*) ++ if test yes = "$GCC"; then ++ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' ++ else ++ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' ++ fi ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ ++ # hardcode_minus_L: Not really in the search PATH, ++ # but as the default location of the library. ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' ++ ;; ++ ++ hpux10*) ++ if test yes,no = "$GCC,$with_gnu_ld"; then ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ else ++ _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' ++ fi ++ if test no = "$with_gnu_ld"; then ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' ++ # hardcode_minus_L: Not really in the search PATH, ++ # but as the default location of the library. ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes ++ fi ++ ;; ++ ++ hpux11*) ++ if test yes,no = "$GCC,$with_gnu_ld"; then ++ case $host_cpu in ++ hppa*64*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ ia64*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ *) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ esac ++ else ++ case $host_cpu in ++ hppa*64*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ ia64*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ *) ++ m4_if($1, [], [ ++ # Older versions of the 11.00 compiler do not understand -b yet ++ # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) ++ _LT_LINKER_OPTION([if $CC understands -b], ++ _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b], ++ [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'], ++ [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])], ++ [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags']) ++ ;; ++ esac ++ fi ++ if test no = "$with_gnu_ld"; then ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ ++ case $host_cpu in ++ hppa*64*|ia64*) ++ _LT_TAGVAR(hardcode_direct, $1)=no ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ ;; ++ *) ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' ++ ++ # hardcode_minus_L: Not really in the search PATH, ++ # but as the default location of the library. ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes ++ ;; ++ esac ++ fi ++ ;; ++ ++ irix5* | irix6* | nonstopux*) ++ if test yes = "$GCC"; then ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ # Try to use the -exported_symbol ld option, if it does not ++ # work, assume that -exports_file does not work either and ++ # implicitly export all symbols. ++ # This should be the same for all languages, so no per-tag cache variable. ++ AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], ++ [lt_cv_irix_exported_symbol], ++ [save_LDFLAGS=$LDFLAGS ++ LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" ++ AC_LINK_IFELSE( ++ [AC_LANG_SOURCE( ++ [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], ++ [C++], [[int foo (void) { return 0; }]], ++ [Fortran 77], [[ ++ subroutine foo ++ end]], ++ [Fortran], [[ ++ subroutine foo ++ end]])])], ++ [lt_cv_irix_exported_symbol=yes], ++ [lt_cv_irix_exported_symbol=no]) ++ LDFLAGS=$save_LDFLAGS]) ++ if test yes = "$lt_cv_irix_exported_symbol"; then ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' ++ fi ++ else ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' ++ fi ++ _LT_TAGVAR(archive_cmds_need_lc, $1)='no' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ _LT_TAGVAR(inherit_rpath, $1)=yes ++ _LT_TAGVAR(link_all_deplibs, $1)=yes ++ ;; ++ ++ linux*) ++ case $cc_basename in ++ tcc*) ++ # Fabrice Bellard et al's Tiny C Compiler ++ _LT_TAGVAR(ld_shlibs, $1)=yes ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ esac ++ ;; ++ ++ netbsd*) ++ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then ++ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out ++ else ++ _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF ++ fi ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ ;; ++ ++ newsos6) ++ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ ;; ++ ++ *nto* | *qnx*) ++ ;; ++ ++ openbsd* | bitrig*) ++ if test -f /usr/libexec/ld.so; then ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes ++ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' ++ else ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' ++ fi ++ else ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ ++ os2*) ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ shrext_cmds=.dll ++ _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ ++ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ ++ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ ++ $ECHO EXPORTS >> $output_objdir/$libname.def~ ++ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ ++ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ ++ emximp -o $lib $output_objdir/$libname.def' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ ++ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ ++ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ ++ $ECHO EXPORTS >> $output_objdir/$libname.def~ ++ prefix_cmds="$SED"~ ++ if test EXPORTS = "`$SED 1q $export_symbols`"; then ++ prefix_cmds="$prefix_cmds -e 1d"; ++ fi~ ++ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ ++ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ ++ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ ++ emximp -o $lib $output_objdir/$libname.def' ++ _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' ++ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ++ _LT_TAGVAR(file_list_spec, $1)='@' ++ ;; ++ ++ osf3*) ++ if test yes = "$GCC"; then ++ _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ else ++ _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ fi ++ _LT_TAGVAR(archive_cmds_need_lc, $1)='no' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ ;; ++ ++ osf4* | osf5*) # as osf3* with the addition of -msym flag ++ if test yes = "$GCC"; then ++ _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ++ else ++ _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ ++ $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' ++ ++ # Both c and cxx compiler support -rpath directly ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' ++ fi ++ _LT_TAGVAR(archive_cmds_need_lc, $1)='no' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ ;; ++ ++ solaris*) ++ _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' ++ if test yes = "$GCC"; then ++ wlarc='$wl' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ ++ $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ else ++ case `$CC -V 2>&1` in ++ *"Compilers 5.0"*) ++ wlarc='' ++ _LT_TAGVAR(archive_cmds, $1)='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ ++ $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ++ ;; ++ *) ++ wlarc='$wl' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ ++ $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ ;; ++ esac ++ fi ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ case $host_os in ++ solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; ++ *) ++ # The compiler driver will combine and reorder linker options, ++ # but understands '-z linker_flag'. GCC discards it without '$wl', ++ # but is careful enough not to reorder. ++ # Supported since Solaris 2.6 (maybe 2.5.1?) ++ if test yes = "$GCC"; then ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' ++ else ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ++ fi ++ ;; ++ esac ++ _LT_TAGVAR(link_all_deplibs, $1)=yes ++ ;; ++ ++ sunos4*) ++ if test sequent = "$host_vendor"; then ++ # Use $CC to link under sequent, because it throws in some extra .o ++ # files that make .init and .fini sections work. ++ _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' ++ else ++ _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' ++ fi ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ ;; ++ ++ sysv4) ++ case $host_vendor in ++ sni) ++ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? ++ ;; ++ siemens) ++ ## LD is ld it makes a PLAMLIB ++ ## CC just makes a GrossModule. ++ _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' ++ _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' ++ _LT_TAGVAR(hardcode_direct, $1)=no ++ ;; ++ motorola) ++ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie ++ ;; ++ esac ++ runpath_var='LD_RUN_PATH' ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ ;; ++ ++ sysv4.3*) ++ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' ++ ;; ++ ++ sysv4*MP*) ++ if test -d /usr/nec; then ++ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ runpath_var=LD_RUN_PATH ++ hardcode_runpath_var=yes ++ _LT_TAGVAR(ld_shlibs, $1)=yes ++ fi ++ ;; ++ ++ sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) ++ _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' ++ _LT_TAGVAR(archive_cmds_need_lc, $1)=no ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ runpath_var='LD_RUN_PATH' ++ ++ if test yes = "$GCC"; then ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ else ++ _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ fi ++ ;; ++ ++ sysv5* | sco3.2v5* | sco5v6*) ++ # Note: We CANNOT use -z defs as we might desire, because we do not ++ # link with -lc, and that would cause any symbols used from libc to ++ # always be unresolved, which means just about no library would ++ # ever link correctly. If we're not using GNU ld we use -z text ++ # though, which does catch some bad symbols but isn't as heavy-handed ++ # as -z defs. ++ _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' ++ _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs' ++ _LT_TAGVAR(archive_cmds_need_lc, $1)=no ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=':' ++ _LT_TAGVAR(link_all_deplibs, $1)=yes ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport' ++ runpath_var='LD_RUN_PATH' ++ ++ if test yes = "$GCC"; then ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ else ++ _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ fi ++ ;; ++ ++ uts4*) ++ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ ;; ++ ++ *) ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ esac ++ ++ if test sni = "$host_vendor"; then ++ case $host in ++ sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Blargedynsym' ++ ;; ++ esac ++ fi ++ fi ++]) ++AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) ++test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no ++ ++_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld ++ ++_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl ++_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl ++_LT_DECL([], [extract_expsyms_cmds], [2], ++ [The commands to extract the exported symbol list from a shared archive]) ++ ++# ++# Do we need to explicitly link libc? ++# ++case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in ++x|xyes) ++ # Assume -lc should be added ++ _LT_TAGVAR(archive_cmds_need_lc, $1)=yes ++ ++ if test yes,yes = "$GCC,$enable_shared"; then ++ case $_LT_TAGVAR(archive_cmds, $1) in ++ *'~'*) ++ # FIXME: we may have to deal with multi-command sequences. ++ ;; ++ '$CC '*) ++ # Test whether the compiler implicitly links with -lc since on some ++ # systems, -lgcc has to come before -lc. If gcc already passes -lc ++ # to ld, don't add -lc before -lgcc. ++ AC_CACHE_CHECK([whether -lc should be explicitly linked in], ++ [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1), ++ [$RM conftest* ++ echo "$lt_simple_compile_test_code" > conftest.$ac_ext ++ ++ if AC_TRY_EVAL(ac_compile) 2>conftest.err; then ++ soname=conftest ++ lib=conftest ++ libobjs=conftest.$ac_objext ++ deplibs= ++ wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) ++ pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) ++ compiler_flags=-v ++ linker_flags=-v ++ verstring= ++ output_objdir=. ++ libname=conftest ++ lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) ++ _LT_TAGVAR(allow_undefined_flag, $1)= ++ if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) ++ then ++ lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no ++ else ++ lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes ++ fi ++ _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag ++ else ++ cat conftest.err 1>&5 ++ fi ++ $RM conftest* ++ ]) ++ _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1) ++ ;; ++ esac ++ fi ++ ;; ++esac ++ ++_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], ++ [Whether or not to add -lc for building shared libraries]) ++_LT_TAGDECL([allow_libtool_libs_with_static_runtimes], ++ [enable_shared_with_static_runtimes], [0], ++ [Whether or not to disallow shared libs when runtime libs are static]) ++_LT_TAGDECL([], [export_dynamic_flag_spec], [1], ++ [Compiler flag to allow reflexive dlopens]) ++_LT_TAGDECL([], [whole_archive_flag_spec], [1], ++ [Compiler flag to generate shared objects directly from archives]) ++_LT_TAGDECL([], [compiler_needs_object], [1], ++ [Whether the compiler copes with passing no objects directly]) ++_LT_TAGDECL([], [old_archive_from_new_cmds], [2], ++ [Create an old-style archive from a shared archive]) ++_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], ++ [Create a temporary old-style archive to link instead of a shared archive]) ++_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) ++_LT_TAGDECL([], [archive_expsym_cmds], [2]) ++_LT_TAGDECL([], [module_cmds], [2], ++ [Commands used to build a loadable module if different from building ++ a shared archive.]) ++_LT_TAGDECL([], [module_expsym_cmds], [2]) ++_LT_TAGDECL([], [with_gnu_ld], [1], ++ [Whether we are building with GNU ld or not]) ++_LT_TAGDECL([], [allow_undefined_flag], [1], ++ [Flag that allows shared libraries with undefined symbols to be built]) ++_LT_TAGDECL([], [no_undefined_flag], [1], ++ [Flag that enforces no undefined symbols]) ++_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], ++ [Flag to hardcode $libdir into a binary during linking. ++ This must work even if $libdir does not exist]) ++_LT_TAGDECL([], [hardcode_libdir_separator], [1], ++ [Whether we need a single "-rpath" flag with a separated argument]) ++_LT_TAGDECL([], [hardcode_direct], [0], ++ [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes ++ DIR into the resulting binary]) ++_LT_TAGDECL([], [hardcode_direct_absolute], [0], ++ [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes ++ DIR into the resulting binary and the resulting library dependency is ++ "absolute", i.e impossible to change by setting $shlibpath_var if the ++ library is relocated]) ++_LT_TAGDECL([], [hardcode_minus_L], [0], ++ [Set to "yes" if using the -LDIR flag during linking hardcodes DIR ++ into the resulting binary]) ++_LT_TAGDECL([], [hardcode_shlibpath_var], [0], ++ [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR ++ into the resulting binary]) ++_LT_TAGDECL([], [hardcode_automatic], [0], ++ [Set to "yes" if building a shared library automatically hardcodes DIR ++ into the library and all subsequent libraries and executables linked ++ against it]) ++_LT_TAGDECL([], [inherit_rpath], [0], ++ [Set to yes if linker adds runtime paths of dependent libraries ++ to runtime path list]) ++_LT_TAGDECL([], [link_all_deplibs], [0], ++ [Whether libtool must link a program against all its dependency libraries]) ++_LT_TAGDECL([], [always_export_symbols], [0], ++ [Set to "yes" if exported symbols are required]) ++_LT_TAGDECL([], [export_symbols_cmds], [2], ++ [The commands to list exported symbols]) ++_LT_TAGDECL([], [exclude_expsyms], [1], ++ [Symbols that should not be listed in the preloaded symbols]) ++_LT_TAGDECL([], [include_expsyms], [1], ++ [Symbols that must always be exported]) ++_LT_TAGDECL([], [prelink_cmds], [2], ++ [Commands necessary for linking programs (against libraries) with templates]) ++_LT_TAGDECL([], [postlink_cmds], [2], ++ [Commands necessary for finishing linking programs]) ++_LT_TAGDECL([], [file_list_spec], [1], ++ [Specify filename containing input files]) ++dnl FIXME: Not yet implemented ++dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], ++dnl [Compiler flag to generate thread safe objects]) ++])# _LT_LINKER_SHLIBS ++ ++ ++# _LT_LANG_C_CONFIG([TAG]) ++# ------------------------ ++# Ensure that the configuration variables for a C compiler are suitably ++# defined. These variables are subsequently used by _LT_CONFIG to write ++# the compiler configuration to 'libtool'. ++m4_defun([_LT_LANG_C_CONFIG], ++[m4_require([_LT_DECL_EGREP])dnl ++lt_save_CC=$CC ++AC_LANG_PUSH(C) ++ ++# Source file extension for C test sources. ++ac_ext=c ++ ++# Object file extension for compiled C test sources. ++objext=o ++_LT_TAGVAR(objext, $1)=$objext ++ ++# Code to be used in simple compile tests ++lt_simple_compile_test_code="int some_variable = 0;" ++ ++# Code to be used in simple link tests ++lt_simple_link_test_code='int main(){return(0);}' ++ ++_LT_TAG_COMPILER ++# Save the default compiler, since it gets overwritten when the other ++# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. ++compiler_DEFAULT=$CC ++ ++# save warnings/boilerplate of simple test code ++_LT_COMPILER_BOILERPLATE ++_LT_LINKER_BOILERPLATE ++ ++if test -n "$compiler"; then ++ _LT_COMPILER_NO_RTTI($1) ++ _LT_COMPILER_PIC($1) ++ _LT_COMPILER_C_O($1) ++ _LT_COMPILER_FILE_LOCKS($1) ++ _LT_LINKER_SHLIBS($1) ++ _LT_SYS_DYNAMIC_LINKER($1) ++ _LT_LINKER_HARDCODE_LIBPATH($1) ++ LT_SYS_DLOPEN_SELF ++ _LT_CMD_STRIPLIB ++ ++ # Report what library types will actually be built ++ AC_MSG_CHECKING([if libtool supports shared libraries]) ++ AC_MSG_RESULT([$can_build_shared]) ++ ++ AC_MSG_CHECKING([whether to build shared libraries]) ++ test no = "$can_build_shared" && enable_shared=no ++ ++ # On AIX, shared libraries and static libraries use the same namespace, and ++ # are all built from PIC. ++ case $host_os in ++ aix3*) ++ test yes = "$enable_shared" && enable_static=no ++ if test -n "$RANLIB"; then ++ archive_cmds="$archive_cmds~\$RANLIB \$lib" ++ postinstall_cmds='$RANLIB $lib' ++ fi ++ ;; ++ ++ aix[[4-9]]*) ++ if test ia64 != "$host_cpu"; then ++ case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in ++ yes,aix,yes) ;; # shared object as lib.so file only ++ yes,svr4,*) ;; # shared object as lib.so archive member only ++ yes,*) enable_static=no ;; # shared object in lib.a archive as well ++ esac ++ fi ++ ;; ++ esac ++ AC_MSG_RESULT([$enable_shared]) ++ ++ AC_MSG_CHECKING([whether to build static libraries]) ++ # Make sure either enable_shared or enable_static is yes. ++ test yes = "$enable_shared" || enable_static=yes ++ AC_MSG_RESULT([$enable_static]) ++ ++ _LT_CONFIG($1) ++fi ++AC_LANG_POP ++CC=$lt_save_CC ++])# _LT_LANG_C_CONFIG ++ ++ ++# _LT_LANG_CXX_CONFIG([TAG]) ++# -------------------------- ++# Ensure that the configuration variables for a C++ compiler are suitably ++# defined. These variables are subsequently used by _LT_CONFIG to write ++# the compiler configuration to 'libtool'. ++m4_defun([_LT_LANG_CXX_CONFIG], ++[m4_require([_LT_FILEUTILS_DEFAULTS])dnl ++m4_require([_LT_DECL_EGREP])dnl ++m4_require([_LT_PATH_MANIFEST_TOOL])dnl ++if test -n "$CXX" && ( test no != "$CXX" && ++ ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || ++ (test g++ != "$CXX"))); then ++ AC_PROG_CXXCPP ++else ++ _lt_caught_CXX_error=yes ++fi ++ ++AC_LANG_PUSH(C++) ++_LT_TAGVAR(archive_cmds_need_lc, $1)=no ++_LT_TAGVAR(allow_undefined_flag, $1)= ++_LT_TAGVAR(always_export_symbols, $1)=no ++_LT_TAGVAR(archive_expsym_cmds, $1)= ++_LT_TAGVAR(compiler_needs_object, $1)=no ++_LT_TAGVAR(export_dynamic_flag_spec, $1)= ++_LT_TAGVAR(hardcode_direct, $1)=no ++_LT_TAGVAR(hardcode_direct_absolute, $1)=no ++_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= ++_LT_TAGVAR(hardcode_libdir_separator, $1)= ++_LT_TAGVAR(hardcode_minus_L, $1)=no ++_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported ++_LT_TAGVAR(hardcode_automatic, $1)=no ++_LT_TAGVAR(inherit_rpath, $1)=no ++_LT_TAGVAR(module_cmds, $1)= ++_LT_TAGVAR(module_expsym_cmds, $1)= ++_LT_TAGVAR(link_all_deplibs, $1)=unknown ++_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds ++_LT_TAGVAR(reload_flag, $1)=$reload_flag ++_LT_TAGVAR(reload_cmds, $1)=$reload_cmds ++_LT_TAGVAR(no_undefined_flag, $1)= ++_LT_TAGVAR(whole_archive_flag_spec, $1)= ++_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no ++ ++# Source file extension for C++ test sources. ++ac_ext=cpp ++ ++# Object file extension for compiled C++ test sources. ++objext=o ++_LT_TAGVAR(objext, $1)=$objext ++ ++# No sense in running all these tests if we already determined that ++# the CXX compiler isn't working. Some variables (like enable_shared) ++# are currently assumed to apply to all compilers on this platform, ++# and will be corrupted by setting them based on a non-working compiler. ++if test yes != "$_lt_caught_CXX_error"; then ++ # Code to be used in simple compile tests ++ lt_simple_compile_test_code="int some_variable = 0;" ++ ++ # Code to be used in simple link tests ++ lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' ++ ++ # ltmain only uses $CC for tagged configurations so make sure $CC is set. ++ _LT_TAG_COMPILER ++ ++ # save warnings/boilerplate of simple test code ++ _LT_COMPILER_BOILERPLATE ++ _LT_LINKER_BOILERPLATE ++ ++ # Allow CC to be a program name with arguments. ++ lt_save_CC=$CC ++ lt_save_CFLAGS=$CFLAGS ++ lt_save_LD=$LD ++ lt_save_GCC=$GCC ++ GCC=$GXX ++ lt_save_with_gnu_ld=$with_gnu_ld ++ lt_save_path_LD=$lt_cv_path_LD ++ if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then ++ lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx ++ else ++ $as_unset lt_cv_prog_gnu_ld ++ fi ++ if test -n "${lt_cv_path_LDCXX+set}"; then ++ lt_cv_path_LD=$lt_cv_path_LDCXX ++ else ++ $as_unset lt_cv_path_LD ++ fi ++ test -z "${LDCXX+set}" || LD=$LDCXX ++ CC=${CXX-"c++"} ++ CFLAGS=$CXXFLAGS ++ compiler=$CC ++ _LT_TAGVAR(compiler, $1)=$CC ++ _LT_CC_BASENAME([$compiler]) ++ ++ if test -n "$compiler"; then ++ # We don't want -fno-exception when compiling C++ code, so set the ++ # no_builtin_flag separately ++ if test yes = "$GXX"; then ++ _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ++ else ++ _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= ++ fi ++ ++ if test yes = "$GXX"; then ++ # Set up default GNU C++ configuration ++ ++ LT_PATH_LD ++ ++ # Check if GNU C++ uses GNU ld as the underlying linker, since the ++ # archiving commands below assume that GNU ld is being used. ++ if test yes = "$with_gnu_ld"; then ++ _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' ++ ++ # If archive_cmds runs LD, not CC, wlarc should be empty ++ # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to ++ # investigate it a little bit more. (MM) ++ wlarc='$wl' ++ ++ # ancient GNU ld didn't support --whole-archive et. al. ++ if eval "`$CC -print-prog-name=ld` --help 2>&1" | ++ $GREP 'no-whole-archive' > /dev/null; then ++ _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' ++ else ++ _LT_TAGVAR(whole_archive_flag_spec, $1)= ++ fi ++ else ++ with_gnu_ld=no ++ wlarc= ++ ++ # A generic and very simple default shared library creation ++ # command for GNU C++ for the case where it uses the native ++ # linker, instead of GNU ld. If possible, this setting should ++ # overridden to take advantage of the native linker features on ++ # the platform it is being used on. ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' ++ fi ++ ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' ++ ++ else ++ GXX=no ++ with_gnu_ld=no ++ wlarc= ++ fi ++ ++ # PORTME: fill in a description of your system's C++ link characteristics ++ AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) ++ _LT_TAGVAR(ld_shlibs, $1)=yes ++ case $host_os in ++ aix3*) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ aix[[4-9]]*) ++ if test ia64 = "$host_cpu"; then ++ # On IA64, the linker does run time linking by default, so we don't ++ # have to do anything special. ++ aix_use_runtimelinking=no ++ exp_sym_flag='-Bexport' ++ no_entry_flag= ++ else ++ aix_use_runtimelinking=no ++ ++ # Test if we are trying to use run time linking or normal ++ # AIX style linking. If -brtl is somewhere in LDFLAGS, we ++ # have runtime linking enabled, and use it for executables. ++ # For shared libraries, we enable/disable runtime linking ++ # depending on the kind of the shared library created - ++ # when "with_aix_soname,aix_use_runtimelinking" is: ++ # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables ++ # "aix,yes" lib.so shared, rtl:yes, for executables ++ # lib.a static archive ++ # "both,no" lib.so.V(shr.o) shared, rtl:yes ++ # lib.a(lib.so.V) shared, rtl:no, for executables ++ # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables ++ # lib.a(lib.so.V) shared, rtl:no ++ # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables ++ # lib.a static archive ++ case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) ++ for ld_flag in $LDFLAGS; do ++ case $ld_flag in ++ *-brtl*) ++ aix_use_runtimelinking=yes ++ break ++ ;; ++ esac ++ done ++ if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then ++ # With aix-soname=svr4, we create the lib.so.V shared archives only, ++ # so we don't have lib.a shared libs to link our executables. ++ # We have to force runtime linking in this case. ++ aix_use_runtimelinking=yes ++ LDFLAGS="$LDFLAGS -Wl,-brtl" ++ fi ++ ;; ++ esac ++ ++ exp_sym_flag='-bexport' ++ no_entry_flag='-bnoentry' ++ fi ++ ++ # When large executables or shared objects are built, AIX ld can ++ # have problems creating the table of contents. If linking a library ++ # or program results in "error TOC overflow" add -mminimal-toc to ++ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not ++ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. ++ ++ _LT_TAGVAR(archive_cmds, $1)='' ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=':' ++ _LT_TAGVAR(link_all_deplibs, $1)=yes ++ _LT_TAGVAR(file_list_spec, $1)='$wl-f,' ++ case $with_aix_soname,$aix_use_runtimelinking in ++ aix,*) ;; # no import file ++ svr4,* | *,yes) # use import file ++ # The Import File defines what to hardcode. ++ _LT_TAGVAR(hardcode_direct, $1)=no ++ _LT_TAGVAR(hardcode_direct_absolute, $1)=no ++ ;; ++ esac ++ ++ if test yes = "$GXX"; then ++ case $host_os in aix4.[[012]]|aix4.[[012]].*) ++ # We only want to do this on AIX 4.2 and lower, the check ++ # below for broken collect2 doesn't work under 4.3+ ++ collect2name=`$CC -print-prog-name=collect2` ++ if test -f "$collect2name" && ++ strings "$collect2name" | $GREP resolve_lib_name >/dev/null ++ then ++ # We have reworked collect2 ++ : ++ else ++ # We have old collect2 ++ _LT_TAGVAR(hardcode_direct, $1)=unsupported ++ # It fails to find uninstalled libraries when the uninstalled ++ # path is not listed in the libpath. Setting hardcode_minus_L ++ # to unsupported forces relinking ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)= ++ fi ++ esac ++ shared_flag='-shared' ++ if test yes = "$aix_use_runtimelinking"; then ++ shared_flag=$shared_flag' $wl-G' ++ fi ++ # Need to ensure runtime linking is disabled for the traditional ++ # shared library, or the linker may eventually find shared libraries ++ # /with/ Import File - we do not want to mix them. ++ shared_flag_aix='-shared' ++ shared_flag_svr4='-shared $wl-G' ++ else ++ # not using gcc ++ if test ia64 = "$host_cpu"; then ++ # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release ++ # chokes on -Wl,-G. The following line is correct: ++ shared_flag='-G' ++ else ++ if test yes = "$aix_use_runtimelinking"; then ++ shared_flag='$wl-G' ++ else ++ shared_flag='$wl-bM:SRE' ++ fi ++ shared_flag_aix='$wl-bM:SRE' ++ shared_flag_svr4='$wl-G' ++ fi ++ fi ++ ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall' ++ # It seems that -bexpall does not export symbols beginning with ++ # underscore (_), so it is better to generate a list of symbols to ++ # export. ++ _LT_TAGVAR(always_export_symbols, $1)=yes ++ if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then ++ # Warning - without using the other runtime loading flags (-brtl), ++ # -berok will link without error, but may produce a broken library. ++ # The "-G" linker flag allows undefined symbols. ++ _LT_TAGVAR(no_undefined_flag, $1)='-bernotok' ++ # Determine the default libpath from the value encoded in an empty ++ # executable. ++ _LT_SYS_MODULE_PATH_AIX([$1]) ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" ++ ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag ++ else ++ if test ia64 = "$host_cpu"; then ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib' ++ _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" ++ _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" ++ else ++ # Determine the default libpath from the value encoded in an ++ # empty executable. ++ _LT_SYS_MODULE_PATH_AIX([$1]) ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" ++ # Warning - without using the other run time loading flags, ++ # -berok will link without error, but may produce a broken library. ++ _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok' ++ _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok' ++ if test yes = "$with_gnu_ld"; then ++ # We only use this code for GNU lds that support --whole-archive. ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' ++ else ++ # Exported symbols can be pulled into shared objects from archives ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' ++ fi ++ _LT_TAGVAR(archive_cmds_need_lc, $1)=yes ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' ++ # -brtl affects multiple linker settings, -berok does not and is overridden later ++ compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`' ++ if test svr4 != "$with_aix_soname"; then ++ # This is similar to how AIX traditionally builds its shared ++ # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. ++ _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' ++ fi ++ if test aix != "$with_aix_soname"; then ++ _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' ++ else ++ # used by -dlpreopen to get the symbols ++ _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV $output_objdir/$realname.d/$soname $output_objdir' ++ fi ++ _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d' ++ fi ++ fi ++ ;; ++ ++ beos*) ++ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ # Joseph Beckenbach says some releases of gcc ++ # support --undefined. This deserves some investigation. FIXME ++ _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ else ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ ++ chorus*) ++ case $cc_basename in ++ *) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ esac ++ ;; ++ ++ cygwin* | mingw* | pw32* | cegcc*) ++ case $GXX,$cc_basename in ++ ,cl* | no,cl* | ,icl* | no,icl*) ++ # Native MSVC or ICC ++ # hardcode_libdir_flag_spec is actually meaningless, as there is ++ # no search path for DLLs. ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ _LT_TAGVAR(always_export_symbols, $1)=yes ++ _LT_TAGVAR(file_list_spec, $1)='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=.dll ++ # FIXME: Setting linknames here is a bad hack. ++ _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then ++ cp "$export_symbols" "$output_objdir/$soname.def"; ++ echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; ++ else ++ $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' ++ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ++ # Don't use ranlib ++ _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' ++ _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile=$lt_outputfile.exe ++ lt_tool_outputfile=$lt_tool_outputfile.exe ++ ;; ++ esac~ ++ func_to_tool_file "$lt_outputfile"~ ++ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # g++ ++ # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, ++ # as there is no search path for DLLs. ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols' ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ _LT_TAGVAR(always_export_symbols, $1)=no ++ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ++ ++ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ # If the export-symbols file already is a .def file, use it as ++ # is; otherwise, prepend EXPORTS... ++ _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then ++ cp $export_symbols $output_objdir/$soname.def; ++ else ++ echo EXPORTS > $output_objdir/$soname.def; ++ cat $export_symbols >> $output_objdir/$soname.def; ++ fi~ ++ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ else ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ esac ++ ;; ++ darwin* | rhapsody*) ++ _LT_DARWIN_LINKER_FEATURES($1) ++ ;; ++ ++ os2*) ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ shrext_cmds=.dll ++ _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ ++ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ ++ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ ++ $ECHO EXPORTS >> $output_objdir/$libname.def~ ++ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ ++ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ ++ emximp -o $lib $output_objdir/$libname.def' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ ++ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ ++ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ ++ $ECHO EXPORTS >> $output_objdir/$libname.def~ ++ prefix_cmds="$SED"~ ++ if test EXPORTS = "`$SED 1q $export_symbols`"; then ++ prefix_cmds="$prefix_cmds -e 1d"; ++ fi~ ++ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ ++ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ ++ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ ++ emximp -o $lib $output_objdir/$libname.def' ++ _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' ++ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ++ _LT_TAGVAR(file_list_spec, $1)='@' ++ ;; ++ ++ dgux*) ++ case $cc_basename in ++ ec++*) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ ghcx*) ++ # Green Hills C++ Compiler ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ *) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ esac ++ ;; ++ ++ freebsd2.*) ++ # C++ shared libraries reported to be fairly broken before ++ # switch to ELF ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ ++ freebsd-elf*) ++ _LT_TAGVAR(archive_cmds_need_lc, $1)=no ++ ;; ++ ++ freebsd* | dragonfly* | midnightbsd*) ++ # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF ++ # conventions ++ _LT_TAGVAR(ld_shlibs, $1)=yes ++ ;; ++ ++ haiku*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(link_all_deplibs, $1)=yes ++ ;; ++ ++ hpux9*) ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, ++ # but as the default ++ # location of the library. ++ ++ case $cc_basename in ++ CC*) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ aCC*) ++ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ # ++ # There doesn't appear to be a way to prevent this compiler from ++ # explicitly linking system object files so we need to strip them ++ # from the output so that they don't get included in the library ++ # dependencies. ++ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ++ ;; ++ *) ++ if test yes = "$GXX"; then ++ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' ++ else ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ esac ++ ;; ++ ++ hpux10*|hpux11*) ++ if test no = "$with_gnu_ld"; then ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ ++ case $host_cpu in ++ hppa*64*|ia64*) ++ ;; ++ *) ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' ++ ;; ++ esac ++ fi ++ case $host_cpu in ++ hppa*64*|ia64*) ++ _LT_TAGVAR(hardcode_direct, $1)=no ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ ;; ++ *) ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes ++ _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, ++ # but as the default ++ # location of the library. ++ ;; ++ esac ++ ++ case $cc_basename in ++ CC*) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ aCC*) ++ case $host_cpu in ++ hppa*64*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ ;; ++ ia64*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ ;; ++ *) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ ;; ++ esac ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ # ++ # There doesn't appear to be a way to prevent this compiler from ++ # explicitly linking system object files so we need to strip them ++ # from the output so that they don't get included in the library ++ # dependencies. ++ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ++ ;; ++ *) ++ if test yes = "$GXX"; then ++ if test no = "$with_gnu_ld"; then ++ case $host_cpu in ++ hppa*64*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ ;; ++ ia64*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ ;; ++ *) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ ;; ++ esac ++ fi ++ else ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ esac ++ ;; ++ ++ interix[[3-9]]*) ++ _LT_TAGVAR(hardcode_direct, $1)=no ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' ++ # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. ++ # Instead, shared libraries are loaded at an image base (0x10000000 by ++ # default) and relocated if they conflict, which is a slow very memory ++ # consuming and fragmenting process. To avoid this, we pick a random, ++ # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link ++ # time. Moving up from 0x10000000 also allows more sbrk(2) space. ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ++ ;; ++ irix5* | irix6*) ++ case $cc_basename in ++ CC*) ++ # SGI C++ ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ ++ # Archives containing C++ object files must be created using ++ # "CC -ar", where "CC" is the IRIX C++ compiler. This is ++ # necessary to make sure instantiated templates are included ++ # in the archive. ++ _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' ++ ;; ++ *) ++ if test yes = "$GXX"; then ++ if test no = "$with_gnu_ld"; then ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ else ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' ++ fi ++ fi ++ _LT_TAGVAR(link_all_deplibs, $1)=yes ++ ;; ++ esac ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ _LT_TAGVAR(inherit_rpath, $1)=yes ++ ;; ++ ++ linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ case $cc_basename in ++ KCC*) ++ # Kuck and Associates, Inc. (KAI) C++ Compiler ++ ++ # KCC will only create a shared library if the output file ++ # ends with ".so" (or ".sl" for HP-UX), so rename the library ++ # to its proper name (with version) after linking. ++ _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ # ++ # There doesn't appear to be a way to prevent this compiler from ++ # explicitly linking system object files so we need to strip them ++ # from the output so that they don't get included in the library ++ # dependencies. ++ output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ++ ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' ++ ++ # Archives containing C++ object files must be created using ++ # "CC -Bstatic", where "CC" is the KAI C++ compiler. ++ _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ++ ;; ++ icpc* | ecpc* ) ++ # Intel C++ ++ with_gnu_ld=yes ++ # version 8.0 and above of icpc choke on multiply defined symbols ++ # if we add $predep_objects and $postdep_objects, however 7.1 and ++ # earlier do not add the objects themselves. ++ case `$CC -V 2>&1` in ++ *"Version 7."*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ ;; ++ *) # Version 8.0 or newer ++ tmp_idyn= ++ case $host_cpu in ++ ia64*) tmp_idyn=' -i_dynamic';; ++ esac ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ ;; ++ esac ++ _LT_TAGVAR(archive_cmds_need_lc, $1)=no ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' ++ ;; ++ pgCC* | pgcpp*) ++ # Portland Group C++ compiler ++ case `$CC -V` in ++ *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*) ++ _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ ++ rm -rf $tpldir~ ++ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ ++ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' ++ _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ ++ rm -rf $tpldir~ ++ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ ++ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ ++ $RANLIB $oldlib' ++ _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ ++ rm -rf $tpldir~ ++ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ ++ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ ++ rm -rf $tpldir~ ++ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ ++ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ ;; ++ *) # Version 6 and above use weak symbols ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ ;; ++ esac ++ ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl--rpath $wl$libdir' ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ ;; ++ cxx*) ++ # Compaq C++ ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' ++ ++ runpath_var=LD_RUN_PATH ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ # ++ # There doesn't appear to be a way to prevent this compiler from ++ # explicitly linking system object files so we need to strip them ++ # from the output so that they don't get included in the library ++ # dependencies. ++ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' ++ ;; ++ xl* | mpixl* | bgxl*) ++ # IBM XL 8.0 on PPC, with GNU ld ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ if test yes = "$supports_anon_versioning"; then ++ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ ++ cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ ++ echo "local: *; };" >> $output_objdir/$libname.ver~ ++ $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' ++ fi ++ ;; ++ *) ++ case `$CC -V 2>&1 | $SED 5q` in ++ *Sun\ C*) ++ # Sun C++ 5.9 ++ _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ _LT_TAGVAR(compiler_needs_object, $1)=yes ++ ++ # Not sure whether something based on ++ # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 ++ # would be better. ++ output_verbose_link_cmd='func_echo_all' ++ ++ # Archives containing C++ object files must be created using ++ # "CC -xar", where "CC" is the Sun C++ compiler. This is ++ # necessary to make sure instantiated templates are included ++ # in the archive. ++ _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ++ ;; ++ esac ++ ;; ++ esac ++ ;; ++ ++ lynxos*) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ ++ m88k*) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ ++ mvs*) ++ case $cc_basename in ++ cxx*) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ *) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ esac ++ ;; ++ ++ netbsd*) ++ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then ++ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' ++ wlarc= ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ fi ++ # Workaround some broken pre-1.5 toolchains ++ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ++ ;; ++ ++ *nto* | *qnx*) ++ _LT_TAGVAR(ld_shlibs, $1)=yes ++ ;; ++ ++ openbsd* | bitrig*) ++ if test -f /usr/libexec/ld.so; then ++ _LT_TAGVAR(hardcode_direct, $1)=yes ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' ++ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' ++ _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' ++ fi ++ output_verbose_link_cmd=func_echo_all ++ else ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ ++ osf3* | osf4* | osf5*) ++ case $cc_basename in ++ KCC*) ++ # Kuck and Associates, Inc. (KAI) C++ Compiler ++ ++ # KCC will only create a shared library if the output file ++ # ends with ".so" (or ".sl" for HP-UX), so rename the library ++ # to its proper name (with version) after linking. ++ _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' ++ ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ ++ # Archives containing C++ object files must be created using ++ # the KAI C++ compiler. ++ case $host in ++ osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; ++ *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; ++ esac ++ ;; ++ RCC*) ++ # Rational C++ 2.4.1 ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ cxx*) ++ case $host in ++ osf3*) ++ _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ++ ;; ++ *) ++ _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ ++ echo "-hidden">> $lib.exp~ ++ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ ++ $RM $lib.exp' ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' ++ ;; ++ esac ++ ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ # ++ # There doesn't appear to be a way to prevent this compiler from ++ # explicitly linking system object files so we need to strip them ++ # from the output so that they don't get included in the library ++ # dependencies. ++ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ++ ;; ++ *) ++ if test yes,no = "$GXX,$with_gnu_ld"; then ++ _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' ++ case $host in ++ osf3*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ ;; ++ *) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ ;; ++ esac ++ ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=: ++ ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' ++ ++ else ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ esac ++ ;; ++ ++ psos*) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ ++ sunos4*) ++ case $cc_basename in ++ CC*) ++ # Sun C++ 4.x ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ lcc*) ++ # Lucid ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ *) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ esac ++ ;; ++ ++ solaris*) ++ case $cc_basename in ++ CC* | sunCC*) ++ # Sun C++ 4.2, 5.x and Centerline C++ ++ _LT_TAGVAR(archive_cmds_need_lc,$1)=yes ++ _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ ++ $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' ++ ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ case $host_os in ++ solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; ++ *) ++ # The compiler driver will combine and reorder linker options, ++ # but understands '-z linker_flag'. ++ # Supported since Solaris 2.6 (maybe 2.5.1?) ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ++ ;; ++ esac ++ _LT_TAGVAR(link_all_deplibs, $1)=yes ++ ++ output_verbose_link_cmd='func_echo_all' ++ ++ # Archives containing C++ object files must be created using ++ # "CC -xar", where "CC" is the Sun C++ compiler. This is ++ # necessary to make sure instantiated templates are included ++ # in the archive. ++ _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ++ ;; ++ gcx*) ++ # Green Hills C++ Compiler ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' ++ ++ # The C++ compiler must be used to create the archive. ++ _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ++ ;; ++ *) ++ # GNU C++ compiler with Solaris linker ++ if test yes,no = "$GXX,$with_gnu_ld"; then ++ _LT_TAGVAR(no_undefined_flag, $1)=' $wl-z ${wl}defs' ++ if $CC --version | $GREP -v '^2\.7' > /dev/null; then ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ ++ $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' ++ ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' ++ else ++ # g++ 2.7 appears to require '-G' NOT '-shared' on this ++ # platform. ++ _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ ++ $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' ++ ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' ++ fi ++ ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $wl$libdir' ++ case $host_os in ++ solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; ++ *) ++ _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' ++ ;; ++ esac ++ fi ++ ;; ++ esac ++ ;; ++ ++ sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) ++ _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' ++ _LT_TAGVAR(archive_cmds_need_lc, $1)=no ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ runpath_var='LD_RUN_PATH' ++ ++ case $cc_basename in ++ CC*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ *) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ esac ++ ;; ++ ++ sysv5* | sco3.2v5* | sco5v6*) ++ # Note: We CANNOT use -z defs as we might desire, because we do not ++ # link with -lc, and that would cause any symbols used from libc to ++ # always be unresolved, which means just about no library would ++ # ever link correctly. If we're not using GNU ld we use -z text ++ # though, which does catch some bad symbols but isn't as heavy-handed ++ # as -z defs. ++ _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' ++ _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs' ++ _LT_TAGVAR(archive_cmds_need_lc, $1)=no ++ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir' ++ _LT_TAGVAR(hardcode_libdir_separator, $1)=':' ++ _LT_TAGVAR(link_all_deplibs, $1)=yes ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport' ++ runpath_var='LD_RUN_PATH' ++ ++ case $cc_basename in ++ CC*) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~ ++ '"$_LT_TAGVAR(old_archive_cmds, $1)" ++ _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~ ++ '"$_LT_TAGVAR(reload_cmds, $1)" ++ ;; ++ *) ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ esac ++ ;; ++ ++ tandem*) ++ case $cc_basename in ++ NCC*) ++ # NonStop-UX NCC 3.20 ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ *) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ esac ++ ;; ++ ++ vxworks*) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ ++ *) ++ # FIXME: insert proper C++ library support ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ ;; ++ esac ++ ++ AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) ++ test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no ++ ++ _LT_TAGVAR(GCC, $1)=$GXX ++ _LT_TAGVAR(LD, $1)=$LD ++ ++ ## CAVEAT EMPTOR: ++ ## There is no encapsulation within the following macros, do not change ++ ## the running order or otherwise move them around unless you know exactly ++ ## what you are doing... ++ _LT_SYS_HIDDEN_LIBDEPS($1) ++ _LT_COMPILER_PIC($1) ++ _LT_COMPILER_C_O($1) ++ _LT_COMPILER_FILE_LOCKS($1) ++ _LT_LINKER_SHLIBS($1) ++ _LT_SYS_DYNAMIC_LINKER($1) ++ _LT_LINKER_HARDCODE_LIBPATH($1) ++ ++ _LT_CONFIG($1) ++ fi # test -n "$compiler" ++ ++ CC=$lt_save_CC ++ CFLAGS=$lt_save_CFLAGS ++ LDCXX=$LD ++ LD=$lt_save_LD ++ GCC=$lt_save_GCC ++ with_gnu_ld=$lt_save_with_gnu_ld ++ lt_cv_path_LDCXX=$lt_cv_path_LD ++ lt_cv_path_LD=$lt_save_path_LD ++ lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld ++ lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld ++fi # test yes != "$_lt_caught_CXX_error" ++ ++AC_LANG_POP ++])# _LT_LANG_CXX_CONFIG ++ ++ ++# _LT_FUNC_STRIPNAME_CNF ++# ---------------------- ++# func_stripname_cnf prefix suffix name ++# strip PREFIX and SUFFIX off of NAME. ++# PREFIX and SUFFIX must not contain globbing or regex special ++# characters, hashes, percent signs, but SUFFIX may contain a leading ++# dot (in which case that matches only a dot). ++# ++# This function is identical to the (non-XSI) version of func_stripname, ++# except this one can be used by m4 code that may be executed by configure, ++# rather than the libtool script. ++m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl ++AC_REQUIRE([_LT_DECL_SED]) ++AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) ++func_stripname_cnf () ++{ ++ case @S|@2 in ++ .*) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%\\\\@S|@2\$%%"`;; ++ *) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%@S|@2\$%%"`;; ++ esac ++} # func_stripname_cnf ++])# _LT_FUNC_STRIPNAME_CNF ++ ++ ++# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) ++# --------------------------------- ++# Figure out "hidden" library dependencies from verbose ++# compiler output when linking a shared library. ++# Parse the compiler output and extract the necessary ++# objects, libraries and library flags. ++m4_defun([_LT_SYS_HIDDEN_LIBDEPS], ++[m4_require([_LT_FILEUTILS_DEFAULTS])dnl ++AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl ++# Dependencies to place before and after the object being linked: ++_LT_TAGVAR(predep_objects, $1)= ++_LT_TAGVAR(postdep_objects, $1)= ++_LT_TAGVAR(predeps, $1)= ++_LT_TAGVAR(postdeps, $1)= ++_LT_TAGVAR(compiler_lib_search_path, $1)= ++ ++dnl we can't use the lt_simple_compile_test_code here, ++dnl because it contains code intended for an executable, ++dnl not a library. It's possible we should let each ++dnl tag define a new lt_????_link_test_code variable, ++dnl but it's only used here... ++m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF ++int a; ++void foo (void) { a = 0; } ++_LT_EOF ++], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF ++class Foo ++{ ++public: ++ Foo (void) { a = 0; } ++private: ++ int a; ++}; ++_LT_EOF ++], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF ++ subroutine foo ++ implicit none ++ integer*4 a ++ a=0 ++ return ++ end ++_LT_EOF ++], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF ++ subroutine foo ++ implicit none ++ integer a ++ a=0 ++ return ++ end ++_LT_EOF ++], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF ++public class foo { ++ private int a; ++ public void bar (void) { ++ a = 0; ++ } ++}; ++_LT_EOF ++], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF ++package foo ++func foo() { ++} ++_LT_EOF ++]) ++ ++_lt_libdeps_save_CFLAGS=$CFLAGS ++case "$CC $CFLAGS " in #( ++*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; ++*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; ++*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; ++esac ++ ++dnl Parse the compiler output and extract the necessary ++dnl objects, libraries and library flags. ++if AC_TRY_EVAL(ac_compile); then ++ # Parse the compiler output and extract the necessary ++ # objects, libraries and library flags. ++ ++ # Sentinel used to keep track of whether or not we are before ++ # the conftest object file. ++ pre_test_object_deps_done=no ++ ++ for p in `eval "$output_verbose_link_cmd"`; do ++ case $prev$p in ++ ++ -L* | -R* | -l*) ++ # Some compilers place space between "-{L,R}" and the path. ++ # Remove the space. ++ if test x-L = "$p" || ++ test x-R = "$p"; then ++ prev=$p ++ continue ++ fi ++ ++ # Expand the sysroot to ease extracting the directories later. ++ if test -z "$prev"; then ++ case $p in ++ -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; ++ -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; ++ -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; ++ esac ++ fi ++ case $p in ++ =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; ++ esac ++ if test no = "$pre_test_object_deps_done"; then ++ case $prev in ++ -L | -R) ++ # Internal compiler library paths should come after those ++ # provided the user. The postdeps already come after the ++ # user supplied libs so there is no need to process them. ++ if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then ++ _LT_TAGVAR(compiler_lib_search_path, $1)=$prev$p ++ else ++ _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} $prev$p" ++ fi ++ ;; ++ # The "-l" case would never come before the object being ++ # linked, so don't bother handling this case. ++ esac ++ else ++ if test -z "$_LT_TAGVAR(postdeps, $1)"; then ++ _LT_TAGVAR(postdeps, $1)=$prev$p ++ else ++ _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} $prev$p" ++ fi ++ fi ++ prev= ++ ;; ++ ++ *.lto.$objext) ;; # Ignore GCC LTO objects ++ *.$objext) ++ # This assumes that the test object file only shows up ++ # once in the compiler output. ++ if test "$p" = "conftest.$objext"; then ++ pre_test_object_deps_done=yes ++ continue ++ fi ++ ++ if test no = "$pre_test_object_deps_done"; then ++ if test -z "$_LT_TAGVAR(predep_objects, $1)"; then ++ _LT_TAGVAR(predep_objects, $1)=$p ++ else ++ _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" ++ fi ++ else ++ if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then ++ _LT_TAGVAR(postdep_objects, $1)=$p ++ else ++ _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" ++ fi ++ fi ++ ;; ++ ++ *) ;; # Ignore the rest. ++ ++ esac ++ done ++ ++ # Clean up. ++ rm -f a.out a.exe ++else ++ echo "libtool.m4: error: problem compiling $1 test program" ++fi ++ ++$RM -f confest.$objext ++CFLAGS=$_lt_libdeps_save_CFLAGS ++ ++# PORTME: override above test on systems where it is broken ++m4_if([$1], [CXX], ++[case $host_os in ++interix[[3-9]]*) ++ # Interix 3.5 installs completely hosed .la files for C++, so rather than ++ # hack all around it, let's just trust "g++" to DTRT. ++ _LT_TAGVAR(predep_objects,$1)= ++ _LT_TAGVAR(postdep_objects,$1)= ++ _LT_TAGVAR(postdeps,$1)= ++ ;; ++esac ++]) ++ ++case " $_LT_TAGVAR(postdeps, $1) " in ++*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; ++esac ++ _LT_TAGVAR(compiler_lib_search_dirs, $1)= ++if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then ++ _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | $SED -e 's! -L! !g' -e 's!^ !!'` ++fi ++_LT_TAGDECL([], [compiler_lib_search_dirs], [1], ++ [The directories searched by this compiler when creating a shared library]) ++_LT_TAGDECL([], [predep_objects], [1], ++ [Dependencies to place before and after the objects being linked to ++ create a shared library]) ++_LT_TAGDECL([], [postdep_objects], [1]) ++_LT_TAGDECL([], [predeps], [1]) ++_LT_TAGDECL([], [postdeps], [1]) ++_LT_TAGDECL([], [compiler_lib_search_path], [1], ++ [The library search path used internally by the compiler when linking ++ a shared library]) ++])# _LT_SYS_HIDDEN_LIBDEPS ++ ++ ++# _LT_LANG_F77_CONFIG([TAG]) ++# -------------------------- ++# Ensure that the configuration variables for a Fortran 77 compiler are ++# suitably defined. These variables are subsequently used by _LT_CONFIG ++# to write the compiler configuration to 'libtool'. ++m4_defun([_LT_LANG_F77_CONFIG], ++[AC_LANG_PUSH(Fortran 77) ++if test -z "$F77" || test no = "$F77"; then ++ _lt_disable_F77=yes ++fi ++ ++_LT_TAGVAR(archive_cmds_need_lc, $1)=no ++_LT_TAGVAR(allow_undefined_flag, $1)= ++_LT_TAGVAR(always_export_symbols, $1)=no ++_LT_TAGVAR(archive_expsym_cmds, $1)= ++_LT_TAGVAR(export_dynamic_flag_spec, $1)= ++_LT_TAGVAR(hardcode_direct, $1)=no ++_LT_TAGVAR(hardcode_direct_absolute, $1)=no ++_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= ++_LT_TAGVAR(hardcode_libdir_separator, $1)= ++_LT_TAGVAR(hardcode_minus_L, $1)=no ++_LT_TAGVAR(hardcode_automatic, $1)=no ++_LT_TAGVAR(inherit_rpath, $1)=no ++_LT_TAGVAR(module_cmds, $1)= ++_LT_TAGVAR(module_expsym_cmds, $1)= ++_LT_TAGVAR(link_all_deplibs, $1)=unknown ++_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds ++_LT_TAGVAR(reload_flag, $1)=$reload_flag ++_LT_TAGVAR(reload_cmds, $1)=$reload_cmds ++_LT_TAGVAR(no_undefined_flag, $1)= ++_LT_TAGVAR(whole_archive_flag_spec, $1)= ++_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no ++ ++# Source file extension for f77 test sources. ++ac_ext=f ++ ++# Object file extension for compiled f77 test sources. ++objext=o ++_LT_TAGVAR(objext, $1)=$objext ++ ++# No sense in running all these tests if we already determined that ++# the F77 compiler isn't working. Some variables (like enable_shared) ++# are currently assumed to apply to all compilers on this platform, ++# and will be corrupted by setting them based on a non-working compiler. ++if test yes != "$_lt_disable_F77"; then ++ # Code to be used in simple compile tests ++ lt_simple_compile_test_code="\ ++ subroutine t ++ return ++ end ++" ++ ++ # Code to be used in simple link tests ++ lt_simple_link_test_code="\ ++ program t ++ end ++" ++ ++ # ltmain only uses $CC for tagged configurations so make sure $CC is set. ++ _LT_TAG_COMPILER ++ ++ # save warnings/boilerplate of simple test code ++ _LT_COMPILER_BOILERPLATE ++ _LT_LINKER_BOILERPLATE ++ ++ # Allow CC to be a program name with arguments. ++ lt_save_CC=$CC ++ lt_save_GCC=$GCC ++ lt_save_CFLAGS=$CFLAGS ++ CC=${F77-"f77"} ++ CFLAGS=$FFLAGS ++ compiler=$CC ++ _LT_TAGVAR(compiler, $1)=$CC ++ _LT_CC_BASENAME([$compiler]) ++ GCC=$G77 ++ if test -n "$compiler"; then ++ AC_MSG_CHECKING([if libtool supports shared libraries]) ++ AC_MSG_RESULT([$can_build_shared]) ++ ++ AC_MSG_CHECKING([whether to build shared libraries]) ++ test no = "$can_build_shared" && enable_shared=no ++ ++ # On AIX, shared libraries and static libraries use the same namespace, and ++ # are all built from PIC. ++ case $host_os in ++ aix3*) ++ test yes = "$enable_shared" && enable_static=no ++ if test -n "$RANLIB"; then ++ archive_cmds="$archive_cmds~\$RANLIB \$lib" ++ postinstall_cmds='$RANLIB $lib' ++ fi ++ ;; ++ aix[[4-9]]*) ++ if test ia64 != "$host_cpu"; then ++ case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in ++ yes,aix,yes) ;; # shared object as lib.so file only ++ yes,svr4,*) ;; # shared object as lib.so archive member only ++ yes,*) enable_static=no ;; # shared object in lib.a archive as well ++ esac ++ fi ++ ;; ++ esac ++ AC_MSG_RESULT([$enable_shared]) ++ ++ AC_MSG_CHECKING([whether to build static libraries]) ++ # Make sure either enable_shared or enable_static is yes. ++ test yes = "$enable_shared" || enable_static=yes ++ AC_MSG_RESULT([$enable_static]) ++ ++ _LT_TAGVAR(GCC, $1)=$G77 ++ _LT_TAGVAR(LD, $1)=$LD ++ ++ ## CAVEAT EMPTOR: ++ ## There is no encapsulation within the following macros, do not change ++ ## the running order or otherwise move them around unless you know exactly ++ ## what you are doing... ++ _LT_COMPILER_PIC($1) ++ _LT_COMPILER_C_O($1) ++ _LT_COMPILER_FILE_LOCKS($1) ++ _LT_LINKER_SHLIBS($1) ++ _LT_SYS_DYNAMIC_LINKER($1) ++ _LT_LINKER_HARDCODE_LIBPATH($1) ++ ++ _LT_CONFIG($1) ++ fi # test -n "$compiler" ++ ++ GCC=$lt_save_GCC ++ CC=$lt_save_CC ++ CFLAGS=$lt_save_CFLAGS ++fi # test yes != "$_lt_disable_F77" ++ ++AC_LANG_POP ++])# _LT_LANG_F77_CONFIG ++ ++ ++# _LT_LANG_FC_CONFIG([TAG]) ++# ------------------------- ++# Ensure that the configuration variables for a Fortran compiler are ++# suitably defined. These variables are subsequently used by _LT_CONFIG ++# to write the compiler configuration to 'libtool'. ++m4_defun([_LT_LANG_FC_CONFIG], ++[AC_LANG_PUSH(Fortran) ++ ++if test -z "$FC" || test no = "$FC"; then ++ _lt_disable_FC=yes ++fi ++ ++_LT_TAGVAR(archive_cmds_need_lc, $1)=no ++_LT_TAGVAR(allow_undefined_flag, $1)= ++_LT_TAGVAR(always_export_symbols, $1)=no ++_LT_TAGVAR(archive_expsym_cmds, $1)= ++_LT_TAGVAR(export_dynamic_flag_spec, $1)= ++_LT_TAGVAR(hardcode_direct, $1)=no ++_LT_TAGVAR(hardcode_direct_absolute, $1)=no ++_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= ++_LT_TAGVAR(hardcode_libdir_separator, $1)= ++_LT_TAGVAR(hardcode_minus_L, $1)=no ++_LT_TAGVAR(hardcode_automatic, $1)=no ++_LT_TAGVAR(inherit_rpath, $1)=no ++_LT_TAGVAR(module_cmds, $1)= ++_LT_TAGVAR(module_expsym_cmds, $1)= ++_LT_TAGVAR(link_all_deplibs, $1)=unknown ++_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds ++_LT_TAGVAR(reload_flag, $1)=$reload_flag ++_LT_TAGVAR(reload_cmds, $1)=$reload_cmds ++_LT_TAGVAR(no_undefined_flag, $1)= ++_LT_TAGVAR(whole_archive_flag_spec, $1)= ++_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no ++ ++# Source file extension for fc test sources. ++ac_ext=${ac_fc_srcext-f} ++ ++# Object file extension for compiled fc test sources. ++objext=o ++_LT_TAGVAR(objext, $1)=$objext ++ ++# No sense in running all these tests if we already determined that ++# the FC compiler isn't working. Some variables (like enable_shared) ++# are currently assumed to apply to all compilers on this platform, ++# and will be corrupted by setting them based on a non-working compiler. ++if test yes != "$_lt_disable_FC"; then ++ # Code to be used in simple compile tests ++ lt_simple_compile_test_code="\ ++ subroutine t ++ return ++ end ++" ++ ++ # Code to be used in simple link tests ++ lt_simple_link_test_code="\ ++ program t ++ end ++" ++ ++ # ltmain only uses $CC for tagged configurations so make sure $CC is set. ++ _LT_TAG_COMPILER ++ ++ # save warnings/boilerplate of simple test code ++ _LT_COMPILER_BOILERPLATE ++ _LT_LINKER_BOILERPLATE ++ ++ # Allow CC to be a program name with arguments. ++ lt_save_CC=$CC ++ lt_save_GCC=$GCC ++ lt_save_CFLAGS=$CFLAGS ++ CC=${FC-"f95"} ++ CFLAGS=$FCFLAGS ++ compiler=$CC ++ GCC=$ac_cv_fc_compiler_gnu ++ ++ _LT_TAGVAR(compiler, $1)=$CC ++ _LT_CC_BASENAME([$compiler]) ++ ++ if test -n "$compiler"; then ++ AC_MSG_CHECKING([if libtool supports shared libraries]) ++ AC_MSG_RESULT([$can_build_shared]) ++ ++ AC_MSG_CHECKING([whether to build shared libraries]) ++ test no = "$can_build_shared" && enable_shared=no ++ ++ # On AIX, shared libraries and static libraries use the same namespace, and ++ # are all built from PIC. ++ case $host_os in ++ aix3*) ++ test yes = "$enable_shared" && enable_static=no ++ if test -n "$RANLIB"; then ++ archive_cmds="$archive_cmds~\$RANLIB \$lib" ++ postinstall_cmds='$RANLIB $lib' ++ fi ++ ;; ++ aix[[4-9]]*) ++ if test ia64 != "$host_cpu"; then ++ case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in ++ yes,aix,yes) ;; # shared object as lib.so file only ++ yes,svr4,*) ;; # shared object as lib.so archive member only ++ yes,*) enable_static=no ;; # shared object in lib.a archive as well ++ esac ++ fi ++ ;; ++ esac ++ AC_MSG_RESULT([$enable_shared]) ++ ++ AC_MSG_CHECKING([whether to build static libraries]) ++ # Make sure either enable_shared or enable_static is yes. ++ test yes = "$enable_shared" || enable_static=yes ++ AC_MSG_RESULT([$enable_static]) ++ ++ _LT_TAGVAR(GCC, $1)=$ac_cv_fc_compiler_gnu ++ _LT_TAGVAR(LD, $1)=$LD ++ ++ ## CAVEAT EMPTOR: ++ ## There is no encapsulation within the following macros, do not change ++ ## the running order or otherwise move them around unless you know exactly ++ ## what you are doing... ++ _LT_SYS_HIDDEN_LIBDEPS($1) ++ _LT_COMPILER_PIC($1) ++ _LT_COMPILER_C_O($1) ++ _LT_COMPILER_FILE_LOCKS($1) ++ _LT_LINKER_SHLIBS($1) ++ _LT_SYS_DYNAMIC_LINKER($1) ++ _LT_LINKER_HARDCODE_LIBPATH($1) ++ ++ _LT_CONFIG($1) ++ fi # test -n "$compiler" ++ ++ GCC=$lt_save_GCC ++ CC=$lt_save_CC ++ CFLAGS=$lt_save_CFLAGS ++fi # test yes != "$_lt_disable_FC" ++ ++AC_LANG_POP ++])# _LT_LANG_FC_CONFIG ++ ++ ++# _LT_LANG_GCJ_CONFIG([TAG]) ++# -------------------------- ++# Ensure that the configuration variables for the GNU Java Compiler compiler ++# are suitably defined. These variables are subsequently used by _LT_CONFIG ++# to write the compiler configuration to 'libtool'. ++m4_defun([_LT_LANG_GCJ_CONFIG], ++[AC_REQUIRE([LT_PROG_GCJ])dnl ++AC_LANG_SAVE ++ ++# Source file extension for Java test sources. ++ac_ext=java ++ ++# Object file extension for compiled Java test sources. ++objext=o ++_LT_TAGVAR(objext, $1)=$objext ++ ++# Code to be used in simple compile tests ++lt_simple_compile_test_code="class foo {}" ++ ++# Code to be used in simple link tests ++lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' ++ ++# ltmain only uses $CC for tagged configurations so make sure $CC is set. ++_LT_TAG_COMPILER ++ ++# save warnings/boilerplate of simple test code ++_LT_COMPILER_BOILERPLATE ++_LT_LINKER_BOILERPLATE ++ ++# Allow CC to be a program name with arguments. ++lt_save_CC=$CC ++lt_save_CFLAGS=$CFLAGS ++lt_save_GCC=$GCC ++GCC=yes ++CC=${GCJ-"gcj"} ++CFLAGS=$GCJFLAGS ++compiler=$CC ++_LT_TAGVAR(compiler, $1)=$CC ++_LT_TAGVAR(LD, $1)=$LD ++_LT_CC_BASENAME([$compiler]) ++ ++# GCJ did not exist at the time GCC didn't implicitly link libc in. ++_LT_TAGVAR(archive_cmds_need_lc, $1)=no ++ ++_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds ++_LT_TAGVAR(reload_flag, $1)=$reload_flag ++_LT_TAGVAR(reload_cmds, $1)=$reload_cmds ++ ++if test -n "$compiler"; then ++ _LT_COMPILER_NO_RTTI($1) ++ _LT_COMPILER_PIC($1) ++ _LT_COMPILER_C_O($1) ++ _LT_COMPILER_FILE_LOCKS($1) ++ _LT_LINKER_SHLIBS($1) ++ _LT_LINKER_HARDCODE_LIBPATH($1) ++ ++ _LT_CONFIG($1) ++fi ++ ++AC_LANG_RESTORE ++ ++GCC=$lt_save_GCC ++CC=$lt_save_CC ++CFLAGS=$lt_save_CFLAGS ++])# _LT_LANG_GCJ_CONFIG ++ ++ ++# _LT_LANG_GO_CONFIG([TAG]) ++# -------------------------- ++# Ensure that the configuration variables for the GNU Go compiler ++# are suitably defined. These variables are subsequently used by _LT_CONFIG ++# to write the compiler configuration to 'libtool'. ++m4_defun([_LT_LANG_GO_CONFIG], ++[AC_REQUIRE([LT_PROG_GO])dnl ++AC_LANG_SAVE ++ ++# Source file extension for Go test sources. ++ac_ext=go ++ ++# Object file extension for compiled Go test sources. ++objext=o ++_LT_TAGVAR(objext, $1)=$objext ++ ++# Code to be used in simple compile tests ++lt_simple_compile_test_code="package main; func main() { }" ++ ++# Code to be used in simple link tests ++lt_simple_link_test_code='package main; func main() { }' ++ ++# ltmain only uses $CC for tagged configurations so make sure $CC is set. ++_LT_TAG_COMPILER ++ ++# save warnings/boilerplate of simple test code ++_LT_COMPILER_BOILERPLATE ++_LT_LINKER_BOILERPLATE ++ ++# Allow CC to be a program name with arguments. ++lt_save_CC=$CC ++lt_save_CFLAGS=$CFLAGS ++lt_save_GCC=$GCC ++GCC=yes ++CC=${GOC-"gccgo"} ++CFLAGS=$GOFLAGS ++compiler=$CC ++_LT_TAGVAR(compiler, $1)=$CC ++_LT_TAGVAR(LD, $1)=$LD ++_LT_CC_BASENAME([$compiler]) ++ ++# Go did not exist at the time GCC didn't implicitly link libc in. ++_LT_TAGVAR(archive_cmds_need_lc, $1)=no ++ ++_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds ++_LT_TAGVAR(reload_flag, $1)=$reload_flag ++_LT_TAGVAR(reload_cmds, $1)=$reload_cmds ++ ++if test -n "$compiler"; then ++ _LT_COMPILER_NO_RTTI($1) ++ _LT_COMPILER_PIC($1) ++ _LT_COMPILER_C_O($1) ++ _LT_COMPILER_FILE_LOCKS($1) ++ _LT_LINKER_SHLIBS($1) ++ _LT_LINKER_HARDCODE_LIBPATH($1) ++ ++ _LT_CONFIG($1) ++fi ++ ++AC_LANG_RESTORE ++ ++GCC=$lt_save_GCC ++CC=$lt_save_CC ++CFLAGS=$lt_save_CFLAGS ++])# _LT_LANG_GO_CONFIG ++ ++ ++# _LT_LANG_RC_CONFIG([TAG]) ++# ------------------------- ++# Ensure that the configuration variables for the Windows resource compiler ++# are suitably defined. These variables are subsequently used by _LT_CONFIG ++# to write the compiler configuration to 'libtool'. ++m4_defun([_LT_LANG_RC_CONFIG], ++[AC_REQUIRE([LT_PROG_RC])dnl ++AC_LANG_SAVE ++ ++# Source file extension for RC test sources. ++ac_ext=rc ++ ++# Object file extension for compiled RC test sources. ++objext=o ++_LT_TAGVAR(objext, $1)=$objext ++ ++# Code to be used in simple compile tests ++lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' ++ ++# Code to be used in simple link tests ++lt_simple_link_test_code=$lt_simple_compile_test_code ++ ++# ltmain only uses $CC for tagged configurations so make sure $CC is set. ++_LT_TAG_COMPILER ++ ++# save warnings/boilerplate of simple test code ++_LT_COMPILER_BOILERPLATE ++_LT_LINKER_BOILERPLATE ++ ++# Allow CC to be a program name with arguments. ++lt_save_CC=$CC ++lt_save_CFLAGS=$CFLAGS ++lt_save_GCC=$GCC ++GCC= ++CC=${RC-"windres"} ++CFLAGS= ++compiler=$CC ++_LT_TAGVAR(compiler, $1)=$CC ++_LT_CC_BASENAME([$compiler]) ++_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes ++ ++if test -n "$compiler"; then ++ : ++ _LT_CONFIG($1) ++fi ++ ++GCC=$lt_save_GCC ++AC_LANG_RESTORE ++CC=$lt_save_CC ++CFLAGS=$lt_save_CFLAGS ++])# _LT_LANG_RC_CONFIG ++ ++ ++# LT_PROG_GCJ ++# ----------- ++AC_DEFUN([LT_PROG_GCJ], ++[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], ++ [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], ++ [AC_CHECK_TOOL(GCJ, gcj,) ++ test set = "${GCJFLAGS+set}" || GCJFLAGS="-g -O2" ++ AC_SUBST(GCJFLAGS)])])[]dnl ++]) ++ ++# Old name: ++AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([LT_AC_PROG_GCJ], []) ++ ++ ++# LT_PROG_GO ++# ---------- ++AC_DEFUN([LT_PROG_GO], ++[AC_CHECK_TOOL(GOC, gccgo,) ++]) ++ ++ ++# LT_PROG_RC ++# ---------- ++AC_DEFUN([LT_PROG_RC], ++[AC_CHECK_TOOL(RC, windres,) ++]) ++ ++# Old name: ++AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([LT_AC_PROG_RC], []) ++ ++ ++# _LT_DECL_EGREP ++# -------------- ++# If we don't have a new enough Autoconf to choose the best grep ++# available, choose the one first in the user's PATH. ++m4_defun([_LT_DECL_EGREP], ++[AC_REQUIRE([AC_PROG_EGREP])dnl ++AC_REQUIRE([AC_PROG_FGREP])dnl ++test -z "$GREP" && GREP=grep ++_LT_DECL([], [GREP], [1], [A grep program that handles long lines]) ++_LT_DECL([], [EGREP], [1], [An ERE matcher]) ++_LT_DECL([], [FGREP], [1], [A literal string matcher]) ++dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too ++AC_SUBST([GREP]) ++]) ++ ++ ++# _LT_DECL_OBJDUMP ++# -------------- ++# If we don't have a new enough Autoconf to choose the best objdump ++# available, choose the one first in the user's PATH. ++m4_defun([_LT_DECL_OBJDUMP], ++[AC_CHECK_TOOL(OBJDUMP, objdump, false) ++test -z "$OBJDUMP" && OBJDUMP=objdump ++_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) ++AC_SUBST([OBJDUMP]) ++]) ++ ++# _LT_DECL_DLLTOOL ++# ---------------- ++# Ensure DLLTOOL variable is set. ++m4_defun([_LT_DECL_DLLTOOL], ++[AC_CHECK_TOOL(DLLTOOL, dlltool, false) ++test -z "$DLLTOOL" && DLLTOOL=dlltool ++_LT_DECL([], [DLLTOOL], [1], [DLL creation program]) ++AC_SUBST([DLLTOOL]) ++]) ++ ++# _LT_DECL_FILECMD ++# ---------------- ++# Check for a file(cmd) program that can be used to detect file type and magic ++m4_defun([_LT_DECL_FILECMD], ++[AC_CHECK_TOOL([FILECMD], [file], [:]) ++_LT_DECL([], [FILECMD], [1], [A file(cmd) program that detects file types]) ++])# _LD_DECL_FILECMD ++ ++# _LT_DECL_SED ++# ------------ ++# Check for a fully-functional sed program, that truncates ++# as few characters as possible. Prefer GNU sed if found. ++m4_defun([_LT_DECL_SED], ++[AC_PROG_SED ++test -z "$SED" && SED=sed ++Xsed="$SED -e 1s/^X//" ++_LT_DECL([], [SED], [1], [A sed program that does not truncate output]) ++_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], ++ [Sed that helps us avoid accidentally triggering echo(1) options like -n]) ++])# _LT_DECL_SED ++ ++m4_ifndef([AC_PROG_SED], [ ++# NOTE: This macro has been submitted for inclusion into # ++# GNU Autoconf as AC_PROG_SED. When it is available in # ++# a released version of Autoconf we should remove this # ++# macro and use it instead. # ++ ++m4_defun([AC_PROG_SED], ++[AC_MSG_CHECKING([for a sed that does not truncate output]) ++AC_CACHE_VAL(lt_cv_path_SED, ++[# Loop through the user's path and test for sed and gsed. ++# Then use that list of sed's as ones to test for truncation. ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for lt_ac_prog in sed gsed; do ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then ++ lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" ++ fi ++ done ++ done ++done ++IFS=$as_save_IFS ++lt_ac_max=0 ++lt_ac_count=0 ++# Add /usr/xpg4/bin/sed as it is typically found on Solaris ++# along with /bin/sed that truncates output. ++for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do ++ test ! -f "$lt_ac_sed" && continue ++ cat /dev/null > conftest.in ++ lt_ac_count=0 ++ echo $ECHO_N "0123456789$ECHO_C" >conftest.in ++ # Check for GNU sed and select it if it is found. ++ if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then ++ lt_cv_path_SED=$lt_ac_sed ++ break ++ fi ++ while true; do ++ cat conftest.in conftest.in >conftest.tmp ++ mv conftest.tmp conftest.in ++ cp conftest.in conftest.nl ++ echo >>conftest.nl ++ $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break ++ cmp -s conftest.out conftest.nl || break ++ # 10000 chars as input seems more than enough ++ test 10 -lt "$lt_ac_count" && break ++ lt_ac_count=`expr $lt_ac_count + 1` ++ if test "$lt_ac_count" -gt "$lt_ac_max"; then ++ lt_ac_max=$lt_ac_count ++ lt_cv_path_SED=$lt_ac_sed ++ fi ++ done ++done ++]) ++SED=$lt_cv_path_SED ++AC_SUBST([SED]) ++AC_MSG_RESULT([$SED]) ++])#AC_PROG_SED ++])#m4_ifndef ++ ++# Old name: ++AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([LT_AC_PROG_SED], []) ++ ++ ++# _LT_CHECK_SHELL_FEATURES ++# ------------------------ ++# Find out whether the shell is Bourne or XSI compatible, ++# or has some other useful features. ++m4_defun([_LT_CHECK_SHELL_FEATURES], ++[if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then ++ lt_unset=unset ++else ++ lt_unset=false ++fi ++_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl ++ ++# test EBCDIC or ASCII ++case `echo X|tr X '\101'` in ++ A) # ASCII based system ++ # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr ++ lt_SP2NL='tr \040 \012' ++ lt_NL2SP='tr \015\012 \040\040' ++ ;; ++ *) # EBCDIC based system ++ lt_SP2NL='tr \100 \n' ++ lt_NL2SP='tr \r\n \100\100' ++ ;; ++esac ++_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl ++_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl ++])# _LT_CHECK_SHELL_FEATURES ++ ++ ++# _LT_PATH_CONVERSION_FUNCTIONS ++# ----------------------------- ++# Determine what file name conversion functions should be used by ++# func_to_host_file (and, implicitly, by func_to_host_path). These are needed ++# for certain cross-compile configurations and native mingw. ++m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], ++[AC_REQUIRE([AC_CANONICAL_HOST])dnl ++AC_REQUIRE([AC_CANONICAL_BUILD])dnl ++AC_MSG_CHECKING([how to convert $build file names to $host format]) ++AC_CACHE_VAL(lt_cv_to_host_file_cmd, ++[case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ++ ;; ++ esac ++ ;; ++ *-*-cygwin* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ++ ;; ++ esac ++ ;; ++ * ) # unhandled hosts (and "normal" native builds) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++esac ++]) ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) ++_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], ++ [0], [convert $build file names to $host format])dnl ++ ++AC_MSG_CHECKING([how to convert $build file names to toolchain format]) ++AC_CACHE_VAL(lt_cv_to_tool_file_cmd, ++[#assume ordinary cross tools, or native build. ++lt_cv_to_tool_file_cmd=func_convert_file_noop ++case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ esac ++ ;; ++esac ++]) ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) ++_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], ++ [0], [convert $build files to toolchain format])dnl ++])# _LT_PATH_CONVERSION_FUNCTIONS ++ ++# Helper functions for option handling. -*- Autoconf -*- ++# ++# Copyright (C) 2004-2005, 2007-2009, 2011-2019, 2021-2022 Free ++# Software Foundation, Inc. ++# Written by Gary V. Vaughan, 2004 ++# ++# This file is free software; the Free Software Foundation gives ++# unlimited permission to copy and/or distribute it, with or without ++# modifications, as long as this notice is preserved. ++ ++# serial 8 ltoptions.m4 ++ ++# This is to help aclocal find these macros, as it can't see m4_define. ++AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) ++ ++ ++# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) ++# ------------------------------------------ ++m4_define([_LT_MANGLE_OPTION], ++[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) ++ ++ ++# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) ++# --------------------------------------- ++# Set option OPTION-NAME for macro MACRO-NAME, and if there is a ++# matching handler defined, dispatch to it. Other OPTION-NAMEs are ++# saved as a flag. ++m4_define([_LT_SET_OPTION], ++[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl ++m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), ++ _LT_MANGLE_DEFUN([$1], [$2]), ++ [m4_warning([Unknown $1 option '$2'])])[]dnl ++]) ++ ++ ++# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) ++# ------------------------------------------------------------ ++# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. ++m4_define([_LT_IF_OPTION], ++[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) ++ ++ ++# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) ++# ------------------------------------------------------- ++# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME ++# are set. ++m4_define([_LT_UNLESS_OPTIONS], ++[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), ++ [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), ++ [m4_define([$0_found])])])[]dnl ++m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 ++])[]dnl ++]) ++ ++ ++# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) ++# ---------------------------------------- ++# OPTION-LIST is a space-separated list of Libtool options associated ++# with MACRO-NAME. If any OPTION has a matching handler declared with ++# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about ++# the unknown option and exit. ++m4_defun([_LT_SET_OPTIONS], ++[# Set options ++m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), ++ [_LT_SET_OPTION([$1], _LT_Option)]) ++ ++m4_if([$1],[LT_INIT],[ ++ dnl ++ dnl Simply set some default values (i.e off) if boolean options were not ++ dnl specified: ++ _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no ++ ]) ++ _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no ++ ]) ++ dnl ++ dnl If no reference was made to various pairs of opposing options, then ++ dnl we run the default mode handler for the pair. For example, if neither ++ dnl 'shared' nor 'disable-shared' was passed, we enable building of shared ++ dnl archives by default: ++ _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) ++ _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) ++ _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) ++ _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], ++ [_LT_ENABLE_FAST_INSTALL]) ++ _LT_UNLESS_OPTIONS([LT_INIT], [aix-soname=aix aix-soname=both aix-soname=svr4], ++ [_LT_WITH_AIX_SONAME([aix])]) ++ ]) ++])# _LT_SET_OPTIONS ++ ++ ++ ++# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) ++# ----------------------------------------- ++m4_define([_LT_MANGLE_DEFUN], ++[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) ++ ++ ++# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) ++# ----------------------------------------------- ++m4_define([LT_OPTION_DEFINE], ++[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl ++])# LT_OPTION_DEFINE ++ ++ ++# dlopen ++# ------ ++LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes ++]) ++ ++AU_DEFUN([AC_LIBTOOL_DLOPEN], ++[_LT_SET_OPTION([LT_INIT], [dlopen]) ++AC_DIAGNOSE([obsolete], ++[$0: Remove this warning and the call to _LT_SET_OPTION when you ++put the 'dlopen' option into LT_INIT's first parameter.]) ++]) ++ ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) ++ ++ ++# win32-dll ++# --------- ++# Declare package support for building win32 dll's. ++LT_OPTION_DEFINE([LT_INIT], [win32-dll], ++[enable_win32_dll=yes ++ ++case $host in ++*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) ++ AC_CHECK_TOOL(AS, as, false) ++ AC_CHECK_TOOL(DLLTOOL, dlltool, false) ++ AC_CHECK_TOOL(OBJDUMP, objdump, false) ++ ;; ++esac ++ ++test -z "$AS" && AS=as ++_LT_DECL([], [AS], [1], [Assembler program])dnl ++ ++test -z "$DLLTOOL" && DLLTOOL=dlltool ++_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl ++ ++test -z "$OBJDUMP" && OBJDUMP=objdump ++_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl ++])# win32-dll ++ ++AU_DEFUN([AC_LIBTOOL_WIN32_DLL], ++[AC_REQUIRE([AC_CANONICAL_HOST])dnl ++_LT_SET_OPTION([LT_INIT], [win32-dll]) ++AC_DIAGNOSE([obsolete], ++[$0: Remove this warning and the call to _LT_SET_OPTION when you ++put the 'win32-dll' option into LT_INIT's first parameter.]) ++]) ++ ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) ++ ++ ++# _LT_ENABLE_SHARED([DEFAULT]) ++# ---------------------------- ++# implement the --enable-shared flag, and supports the 'shared' and ++# 'disable-shared' LT_INIT options. ++# DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. ++m4_define([_LT_ENABLE_SHARED], ++[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl ++AC_ARG_ENABLE([shared], ++ [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], ++ [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], ++ [p=${PACKAGE-default} ++ case $enableval in ++ yes) enable_shared=yes ;; ++ no) enable_shared=no ;; ++ *) ++ enable_shared=no ++ # Look at the argument we got. We use all the common list separators. ++ lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, ++ for pkg in $enableval; do ++ IFS=$lt_save_ifs ++ if test "X$pkg" = "X$p"; then ++ enable_shared=yes ++ fi ++ done ++ IFS=$lt_save_ifs ++ ;; ++ esac], ++ [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) ++ ++ _LT_DECL([build_libtool_libs], [enable_shared], [0], ++ [Whether or not to build shared libraries]) ++])# _LT_ENABLE_SHARED ++ ++LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) ++LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) ++ ++# Old names: ++AC_DEFUN([AC_ENABLE_SHARED], ++[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) ++]) ++ ++AC_DEFUN([AC_DISABLE_SHARED], ++[_LT_SET_OPTION([LT_INIT], [disable-shared]) ++]) ++ ++AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) ++AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) ++ ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AM_ENABLE_SHARED], []) ++dnl AC_DEFUN([AM_DISABLE_SHARED], []) ++ ++ ++ ++# _LT_ENABLE_STATIC([DEFAULT]) ++# ---------------------------- ++# implement the --enable-static flag, and support the 'static' and ++# 'disable-static' LT_INIT options. ++# DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. ++m4_define([_LT_ENABLE_STATIC], ++[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl ++AC_ARG_ENABLE([static], ++ [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], ++ [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], ++ [p=${PACKAGE-default} ++ case $enableval in ++ yes) enable_static=yes ;; ++ no) enable_static=no ;; ++ *) ++ enable_static=no ++ # Look at the argument we got. We use all the common list separators. ++ lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, ++ for pkg in $enableval; do ++ IFS=$lt_save_ifs ++ if test "X$pkg" = "X$p"; then ++ enable_static=yes ++ fi ++ done ++ IFS=$lt_save_ifs ++ ;; ++ esac], ++ [enable_static=]_LT_ENABLE_STATIC_DEFAULT) ++ ++ _LT_DECL([build_old_libs], [enable_static], [0], ++ [Whether or not to build static libraries]) ++])# _LT_ENABLE_STATIC ++ ++LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) ++LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) ++ ++# Old names: ++AC_DEFUN([AC_ENABLE_STATIC], ++[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) ++]) ++ ++AC_DEFUN([AC_DISABLE_STATIC], ++[_LT_SET_OPTION([LT_INIT], [disable-static]) ++]) ++ ++AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) ++AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) ++ ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AM_ENABLE_STATIC], []) ++dnl AC_DEFUN([AM_DISABLE_STATIC], []) ++ ++ ++ ++# _LT_ENABLE_FAST_INSTALL([DEFAULT]) ++# ---------------------------------- ++# implement the --enable-fast-install flag, and support the 'fast-install' ++# and 'disable-fast-install' LT_INIT options. ++# DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. ++m4_define([_LT_ENABLE_FAST_INSTALL], ++[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl ++AC_ARG_ENABLE([fast-install], ++ [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], ++ [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], ++ [p=${PACKAGE-default} ++ case $enableval in ++ yes) enable_fast_install=yes ;; ++ no) enable_fast_install=no ;; ++ *) ++ enable_fast_install=no ++ # Look at the argument we got. We use all the common list separators. ++ lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, ++ for pkg in $enableval; do ++ IFS=$lt_save_ifs ++ if test "X$pkg" = "X$p"; then ++ enable_fast_install=yes ++ fi ++ done ++ IFS=$lt_save_ifs ++ ;; ++ esac], ++ [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) ++ ++_LT_DECL([fast_install], [enable_fast_install], [0], ++ [Whether or not to optimize for fast installation])dnl ++])# _LT_ENABLE_FAST_INSTALL ++ ++LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) ++LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) ++ ++# Old names: ++AU_DEFUN([AC_ENABLE_FAST_INSTALL], ++[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) ++AC_DIAGNOSE([obsolete], ++[$0: Remove this warning and the call to _LT_SET_OPTION when you put ++the 'fast-install' option into LT_INIT's first parameter.]) ++]) ++ ++AU_DEFUN([AC_DISABLE_FAST_INSTALL], ++[_LT_SET_OPTION([LT_INIT], [disable-fast-install]) ++AC_DIAGNOSE([obsolete], ++[$0: Remove this warning and the call to _LT_SET_OPTION when you put ++the 'disable-fast-install' option into LT_INIT's first parameter.]) ++]) ++ ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) ++dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) ++ ++ ++# _LT_WITH_AIX_SONAME([DEFAULT]) ++# ---------------------------------- ++# implement the --with-aix-soname flag, and support the `aix-soname=aix' ++# and `aix-soname=both' and `aix-soname=svr4' LT_INIT options. DEFAULT ++# is either `aix', `both' or `svr4'. If omitted, it defaults to `aix'. ++m4_define([_LT_WITH_AIX_SONAME], ++[m4_define([_LT_WITH_AIX_SONAME_DEFAULT], [m4_if($1, svr4, svr4, m4_if($1, both, both, aix))])dnl ++shared_archive_member_spec= ++case $host,$enable_shared in ++power*-*-aix[[5-9]]*,yes) ++ AC_MSG_CHECKING([which variant of shared library versioning to provide]) ++ AC_ARG_WITH([aix-soname], ++ [AS_HELP_STRING([--with-aix-soname=aix|svr4|both], ++ [shared library versioning (aka "SONAME") variant to provide on AIX, @<:@default=]_LT_WITH_AIX_SONAME_DEFAULT[@:>@.])], ++ [case $withval in ++ aix|svr4|both) ++ ;; ++ *) ++ AC_MSG_ERROR([Unknown argument to --with-aix-soname]) ++ ;; ++ esac ++ lt_cv_with_aix_soname=$with_aix_soname], ++ [AC_CACHE_VAL([lt_cv_with_aix_soname], ++ [lt_cv_with_aix_soname=]_LT_WITH_AIX_SONAME_DEFAULT) ++ with_aix_soname=$lt_cv_with_aix_soname]) ++ AC_MSG_RESULT([$with_aix_soname]) ++ if test aix != "$with_aix_soname"; then ++ # For the AIX way of multilib, we name the shared archive member ++ # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', ++ # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. ++ # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, ++ # the AIX toolchain works better with OBJECT_MODE set (default 32). ++ if test 64 = "${OBJECT_MODE-32}"; then ++ shared_archive_member_spec=shr_64 ++ else ++ shared_archive_member_spec=shr ++ fi ++ fi ++ ;; ++*) ++ with_aix_soname=aix ++ ;; ++esac ++ ++_LT_DECL([], [shared_archive_member_spec], [0], ++ [Shared archive member basename, for filename based shared library versioning on AIX])dnl ++])# _LT_WITH_AIX_SONAME ++ ++LT_OPTION_DEFINE([LT_INIT], [aix-soname=aix], [_LT_WITH_AIX_SONAME([aix])]) ++LT_OPTION_DEFINE([LT_INIT], [aix-soname=both], [_LT_WITH_AIX_SONAME([both])]) ++LT_OPTION_DEFINE([LT_INIT], [aix-soname=svr4], [_LT_WITH_AIX_SONAME([svr4])]) ++ ++ ++# _LT_WITH_PIC([MODE]) ++# -------------------- ++# implement the --with-pic flag, and support the 'pic-only' and 'no-pic' ++# LT_INIT options. ++# MODE is either 'yes' or 'no'. If omitted, it defaults to 'both'. ++m4_define([_LT_WITH_PIC], ++[AC_ARG_WITH([pic], ++ [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@], ++ [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], ++ [lt_p=${PACKAGE-default} ++ case $withval in ++ yes|no) pic_mode=$withval ;; ++ *) ++ pic_mode=default ++ # Look at the argument we got. We use all the common list separators. ++ lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, ++ for lt_pkg in $withval; do ++ IFS=$lt_save_ifs ++ if test "X$lt_pkg" = "X$lt_p"; then ++ pic_mode=yes ++ fi ++ done ++ IFS=$lt_save_ifs ++ ;; ++ esac], ++ [pic_mode=m4_default([$1], [default])]) ++ ++_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl ++])# _LT_WITH_PIC ++ ++LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) ++LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) ++ ++# Old name: ++AU_DEFUN([AC_LIBTOOL_PICMODE], ++[_LT_SET_OPTION([LT_INIT], [pic-only]) ++AC_DIAGNOSE([obsolete], ++[$0: Remove this warning and the call to _LT_SET_OPTION when you ++put the 'pic-only' option into LT_INIT's first parameter.]) ++]) ++ ++dnl aclocal-1.4 backwards compatibility: ++dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) ++ ++ ++m4_define([_LTDL_MODE], []) ++LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], ++ [m4_define([_LTDL_MODE], [nonrecursive])]) ++LT_OPTION_DEFINE([LTDL_INIT], [recursive], ++ [m4_define([_LTDL_MODE], [recursive])]) ++LT_OPTION_DEFINE([LTDL_INIT], [subproject], ++ [m4_define([_LTDL_MODE], [subproject])]) ++ ++m4_define([_LTDL_TYPE], []) ++LT_OPTION_DEFINE([LTDL_INIT], [installable], ++ [m4_define([_LTDL_TYPE], [installable])]) ++LT_OPTION_DEFINE([LTDL_INIT], [convenience], ++ [m4_define([_LTDL_TYPE], [convenience])]) ++ ++# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- ++# ++# Copyright (C) 2004-2005, 2007-2008, 2011-2019, 2021-2022 Free Software ++# Foundation, Inc. ++# Written by Gary V. Vaughan, 2004 ++# ++# This file is free software; the Free Software Foundation gives ++# unlimited permission to copy and/or distribute it, with or without ++# modifications, as long as this notice is preserved. ++ ++# serial 6 ltsugar.m4 ++ ++# This is to help aclocal find these macros, as it can't see m4_define. ++AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) ++ ++ ++# lt_join(SEP, ARG1, [ARG2...]) ++# ----------------------------- ++# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their ++# associated separator. ++# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier ++# versions in m4sugar had bugs. ++m4_define([lt_join], ++[m4_if([$#], [1], [], ++ [$#], [2], [[$2]], ++ [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) ++m4_define([_lt_join], ++[m4_if([$#$2], [2], [], ++ [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) ++ ++ ++# lt_car(LIST) ++# lt_cdr(LIST) ++# ------------ ++# Manipulate m4 lists. ++# These macros are necessary as long as will still need to support ++# Autoconf-2.59, which quotes differently. ++m4_define([lt_car], [[$1]]) ++m4_define([lt_cdr], ++[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], ++ [$#], 1, [], ++ [m4_dquote(m4_shift($@))])]) ++m4_define([lt_unquote], $1) ++ ++ ++# lt_append(MACRO-NAME, STRING, [SEPARATOR]) ++# ------------------------------------------ ++# Redefine MACRO-NAME to hold its former content plus 'SEPARATOR''STRING'. ++# Note that neither SEPARATOR nor STRING are expanded; they are appended ++# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). ++# No SEPARATOR is output if MACRO-NAME was previously undefined (different ++# than defined and empty). ++# ++# This macro is needed until we can rely on Autoconf 2.62, since earlier ++# versions of m4sugar mistakenly expanded SEPARATOR but not STRING. ++m4_define([lt_append], ++[m4_define([$1], ++ m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) ++ ++ ++ ++# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) ++# ---------------------------------------------------------- ++# Produce a SEP delimited list of all paired combinations of elements of ++# PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list ++# has the form PREFIXmINFIXSUFFIXn. ++# Needed until we can rely on m4_combine added in Autoconf 2.62. ++m4_define([lt_combine], ++[m4_if(m4_eval([$# > 3]), [1], ++ [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl ++[[m4_foreach([_Lt_prefix], [$2], ++ [m4_foreach([_Lt_suffix], ++ ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, ++ [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) ++ ++ ++# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) ++# ----------------------------------------------------------------------- ++# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited ++# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. ++m4_define([lt_if_append_uniq], ++[m4_ifdef([$1], ++ [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], ++ [lt_append([$1], [$2], [$3])$4], ++ [$5])], ++ [lt_append([$1], [$2], [$3])$4])]) ++ ++ ++# lt_dict_add(DICT, KEY, VALUE) ++# ----------------------------- ++m4_define([lt_dict_add], ++[m4_define([$1($2)], [$3])]) ++ ++ ++# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) ++# -------------------------------------------- ++m4_define([lt_dict_add_subkey], ++[m4_define([$1($2:$3)], [$4])]) ++ ++ ++# lt_dict_fetch(DICT, KEY, [SUBKEY]) ++# ---------------------------------- ++m4_define([lt_dict_fetch], ++[m4_ifval([$3], ++ m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), ++ m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) ++ ++ ++# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) ++# ----------------------------------------------------------------- ++m4_define([lt_if_dict_fetch], ++[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], ++ [$5], ++ [$6])]) ++ ++ ++# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) ++# -------------------------------------------------------------- ++m4_define([lt_dict_filter], ++[m4_if([$5], [], [], ++ [lt_join(m4_quote(m4_default([$4], [[, ]])), ++ lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), ++ [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl ++]) ++ ++# ltversion.m4 -- version numbers -*- Autoconf -*- ++# ++# Copyright (C) 2004, 2011-2019, 2021-2022 Free Software Foundation, ++# Inc. ++# Written by Scott James Remnant, 2004 ++# ++# This file is free software; the Free Software Foundation gives ++# unlimited permission to copy and/or distribute it, with or without ++# modifications, as long as this notice is preserved. ++ ++# @configure_input@ ++ ++# serial 4245 ltversion.m4 ++# This file is part of GNU Libtool ++ ++m4_define([LT_PACKAGE_VERSION], [2.4.7]) ++m4_define([LT_PACKAGE_REVISION], [2.4.7]) ++ ++AC_DEFUN([LTVERSION_VERSION], ++[macro_version='2.4.7' ++macro_revision='2.4.7' ++_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) ++_LT_DECL(, macro_revision, 0) ++]) ++ ++# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- ++# ++# Copyright (C) 2004-2005, 2007, 2009, 2011-2019, 2021-2022 Free ++# Software Foundation, Inc. ++# Written by Scott James Remnant, 2004. ++# ++# This file is free software; the Free Software Foundation gives ++# unlimited permission to copy and/or distribute it, with or without ++# modifications, as long as this notice is preserved. ++ ++# serial 5 lt~obsolete.m4 ++ ++# These exist entirely to fool aclocal when bootstrapping libtool. ++# ++# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN), ++# which have later been changed to m4_define as they aren't part of the ++# exported API, or moved to Autoconf or Automake where they belong. ++# ++# The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN ++# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us ++# using a macro with the same name in our local m4/libtool.m4 it'll ++# pull the old libtool.m4 in (it doesn't see our shiny new m4_define ++# and doesn't know about Autoconf macros at all.) ++# ++# So we provide this file, which has a silly filename so it's always ++# included after everything else. This provides aclocal with the ++# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything ++# because those macros already exist, or will be overwritten later. ++# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. ++# ++# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. ++# Yes, that means every name once taken will need to remain here until ++# we give up compatibility with versions before 1.7, at which point ++# we need to keep only those names which we still refer to. ++ ++# This is to help aclocal find these macros, as it can't see m4_define. ++AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) ++ ++m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) ++m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) ++m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) ++m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) ++m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) ++m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) ++m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) ++m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) ++m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) ++m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) ++m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) ++m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) ++m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) ++m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) ++m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) ++m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) ++m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) ++m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) ++m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) ++m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) ++m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) ++m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) ++m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) ++m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) ++m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) ++m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) ++m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) ++m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) ++m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) ++m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) ++m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) ++m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) ++m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) ++m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) ++m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) ++m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) ++m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) ++m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) ++m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) ++m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) ++m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) ++m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) ++m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) ++m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) ++m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) ++m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) ++m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) ++m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) ++m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) ++m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) ++m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) ++m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) ++m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) ++m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) ++m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])]) ++m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])]) ++m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])]) ++m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])]) ++m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])]) ++m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])]) ++m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])]) ++ ++# Copyright (C) 2002-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# AM_AUTOMAKE_VERSION(VERSION) ++# ---------------------------- ++# Automake X.Y traces this macro to ensure aclocal.m4 has been ++# generated from the m4 files accompanying Automake X.Y. ++# (This private macro should not be called outside this file.) ++AC_DEFUN([AM_AUTOMAKE_VERSION], ++[am__api_version='1.16' ++dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to ++dnl require some minimum version. Point them to the right macro. ++m4_if([$1], [1.16.5], [], ++ [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ++]) ++ ++# _AM_AUTOCONF_VERSION(VERSION) ++# ----------------------------- ++# aclocal traces this macro to find the Autoconf version. ++# This is a private macro too. Using m4_define simplifies ++# the logic in aclocal, which can simply ignore this definition. ++m4_define([_AM_AUTOCONF_VERSION], []) ++ ++# AM_SET_CURRENT_AUTOMAKE_VERSION ++# ------------------------------- ++# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. ++# This function is AC_REQUIREd by AM_INIT_AUTOMAKE. ++AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], ++[AM_AUTOMAKE_VERSION([1.16.5])dnl ++m4_ifndef([AC_AUTOCONF_VERSION], ++ [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl ++_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) ++ ++# AM_AUX_DIR_EXPAND -*- Autoconf -*- ++ ++# Copyright (C) 2001-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets ++# $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to ++# '$srcdir', '$srcdir/..', or '$srcdir/../..'. ++# ++# Of course, Automake must honor this variable whenever it calls a ++# tool from the auxiliary directory. The problem is that $srcdir (and ++# therefore $ac_aux_dir as well) can be either absolute or relative, ++# depending on how configure is run. This is pretty annoying, since ++# it makes $ac_aux_dir quite unusable in subdirectories: in the top ++# source directory, any form will work fine, but in subdirectories a ++# relative path needs to be adjusted first. ++# ++# $ac_aux_dir/missing ++# fails when called from a subdirectory if $ac_aux_dir is relative ++# $top_srcdir/$ac_aux_dir/missing ++# fails if $ac_aux_dir is absolute, ++# fails when called from a subdirectory in a VPATH build with ++# a relative $ac_aux_dir ++# ++# The reason of the latter failure is that $top_srcdir and $ac_aux_dir ++# are both prefixed by $srcdir. In an in-source build this is usually ++# harmless because $srcdir is '.', but things will broke when you ++# start a VPATH build or use an absolute $srcdir. ++# ++# So we could use something similar to $top_srcdir/$ac_aux_dir/missing, ++# iff we strip the leading $srcdir from $ac_aux_dir. That would be: ++# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` ++# and then we would define $MISSING as ++# MISSING="\${SHELL} $am_aux_dir/missing" ++# This will work as long as MISSING is not called from configure, because ++# unfortunately $(top_srcdir) has no meaning in configure. ++# However there are other variables, like CC, which are often used in ++# configure, and could therefore not use this "fixed" $ac_aux_dir. ++# ++# Another solution, used here, is to always expand $ac_aux_dir to an ++# absolute PATH. The drawback is that using absolute paths prevent a ++# configured tree to be moved without reconfiguration. ++ ++AC_DEFUN([AM_AUX_DIR_EXPAND], ++[AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl ++# Expand $ac_aux_dir to an absolute path. ++am_aux_dir=`cd "$ac_aux_dir" && pwd` ++]) ++ ++# AM_CONDITIONAL -*- Autoconf -*- ++ ++# Copyright (C) 1997-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# AM_CONDITIONAL(NAME, SHELL-CONDITION) ++# ------------------------------------- ++# Define a conditional. ++AC_DEFUN([AM_CONDITIONAL], ++[AC_PREREQ([2.52])dnl ++ m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], ++ [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl ++AC_SUBST([$1_TRUE])dnl ++AC_SUBST([$1_FALSE])dnl ++_AM_SUBST_NOTMAKE([$1_TRUE])dnl ++_AM_SUBST_NOTMAKE([$1_FALSE])dnl ++m4_define([_AM_COND_VALUE_$1], [$2])dnl ++if $2; then ++ $1_TRUE= ++ $1_FALSE='#' ++else ++ $1_TRUE='#' ++ $1_FALSE= ++fi ++AC_CONFIG_COMMANDS_PRE( ++[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then ++ AC_MSG_ERROR([[conditional "$1" was never defined. ++Usually this means the macro was only invoked conditionally.]]) ++fi])]) ++ ++# Copyright (C) 1999-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++ ++# There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be ++# written in clear, in which case automake, when reading aclocal.m4, ++# will think it sees a *use*, and therefore will trigger all it's ++# C support machinery. Also note that it means that autoscan, seeing ++# CC etc. in the Makefile, will ask for an AC_PROG_CC use... ++ ++ ++# _AM_DEPENDENCIES(NAME) ++# ---------------------- ++# See how the compiler implements dependency checking. ++# NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC". ++# We try a few techniques and use that to set a single cache variable. ++# ++# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was ++# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular ++# dependency, and given that the user is not expected to run this macro, ++# just rely on AC_PROG_CC. ++AC_DEFUN([_AM_DEPENDENCIES], ++[AC_REQUIRE([AM_SET_DEPDIR])dnl ++AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl ++AC_REQUIRE([AM_MAKE_INCLUDE])dnl ++AC_REQUIRE([AM_DEP_TRACK])dnl ++ ++m4_if([$1], [CC], [depcc="$CC" am_compiler_list=], ++ [$1], [CXX], [depcc="$CXX" am_compiler_list=], ++ [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'], ++ [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'], ++ [$1], [UPC], [depcc="$UPC" am_compiler_list=], ++ [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'], ++ [depcc="$$1" am_compiler_list=]) ++ ++AC_CACHE_CHECK([dependency style of $depcc], ++ [am_cv_$1_dependencies_compiler_type], ++[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then ++ # We make a subdir and do the tests there. Otherwise we can end up ++ # making bogus files that we don't know about and never remove. For ++ # instance it was reported that on HP-UX the gcc test will end up ++ # making a dummy file named 'D' -- because '-MD' means "put the output ++ # in D". ++ rm -rf conftest.dir ++ mkdir conftest.dir ++ # Copy depcomp to subdir because otherwise we won't find it if we're ++ # using a relative directory. ++ cp "$am_depcomp" conftest.dir ++ cd conftest.dir ++ # We will build objects and dependencies in a subdirectory because ++ # it helps to detect inapplicable dependency modes. For instance ++ # both Tru64's cc and ICC support -MD to output dependencies as a ++ # side effect of compilation, but ICC will put the dependencies in ++ # the current directory while Tru64 will put them in the object ++ # directory. ++ mkdir sub ++ ++ am_cv_$1_dependencies_compiler_type=none ++ if test "$am_compiler_list" = ""; then ++ am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` ++ fi ++ am__universal=false ++ m4_case([$1], [CC], ++ [case " $depcc " in #( ++ *\ -arch\ *\ -arch\ *) am__universal=true ;; ++ esac], ++ [CXX], ++ [case " $depcc " in #( ++ *\ -arch\ *\ -arch\ *) am__universal=true ;; ++ esac]) ++ ++ for depmode in $am_compiler_list; do ++ # Setup a source with many dependencies, because some compilers ++ # like to wrap large dependency lists on column 80 (with \), and ++ # we should not choose a depcomp mode which is confused by this. ++ # ++ # We need to recreate these files for each test, as the compiler may ++ # overwrite some of them when testing with obscure command lines. ++ # This happens at least with the AIX C compiler. ++ : > sub/conftest.c ++ for i in 1 2 3 4 5 6; do ++ echo '#include "conftst'$i'.h"' >> sub/conftest.c ++ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with ++ # Solaris 10 /bin/sh. ++ echo '/* dummy */' > sub/conftst$i.h ++ done ++ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf ++ ++ # We check with '-c' and '-o' for the sake of the "dashmstdout" ++ # mode. It turns out that the SunPro C++ compiler does not properly ++ # handle '-M -o', and we need to detect this. Also, some Intel ++ # versions had trouble with output in subdirs. ++ am__obj=sub/conftest.${OBJEXT-o} ++ am__minus_obj="-o $am__obj" ++ case $depmode in ++ gcc) ++ # This depmode causes a compiler race in universal mode. ++ test "$am__universal" = false || continue ++ ;; ++ nosideeffect) ++ # After this tag, mechanisms are not by side-effect, so they'll ++ # only be used when explicitly requested. ++ if test "x$enable_dependency_tracking" = xyes; then ++ continue ++ else ++ break ++ fi ++ ;; ++ msvc7 | msvc7msys | msvisualcpp | msvcmsys) ++ # This compiler won't grok '-c -o', but also, the minuso test has ++ # not run yet. These depmodes are late enough in the game, and ++ # so weak that their functioning should not be impacted. ++ am__obj=conftest.${OBJEXT-o} ++ am__minus_obj= ++ ;; ++ none) break ;; ++ esac ++ if depmode=$depmode \ ++ source=sub/conftest.c object=$am__obj \ ++ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ ++ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ ++ >/dev/null 2>conftest.err && ++ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && ++ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && ++ grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ++ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then ++ # icc doesn't choke on unknown options, it will just issue warnings ++ # or remarks (even with -Werror). So we grep stderr for any message ++ # that says an option was ignored or not supported. ++ # When given -MP, icc 7.0 and 7.1 complain thusly: ++ # icc: Command line warning: ignoring option '-M'; no argument required ++ # The diagnosis changed in icc 8.0: ++ # icc: Command line remark: option '-MP' not supported ++ if (grep 'ignoring option' conftest.err || ++ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else ++ am_cv_$1_dependencies_compiler_type=$depmode ++ break ++ fi ++ fi ++ done ++ ++ cd .. ++ rm -rf conftest.dir ++else ++ am_cv_$1_dependencies_compiler_type=none ++fi ++]) ++AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) ++AM_CONDITIONAL([am__fastdep$1], [ ++ test "x$enable_dependency_tracking" != xno \ ++ && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) ++]) ++ ++ ++# AM_SET_DEPDIR ++# ------------- ++# Choose a directory name for dependency files. ++# This macro is AC_REQUIREd in _AM_DEPENDENCIES. ++AC_DEFUN([AM_SET_DEPDIR], ++[AC_REQUIRE([AM_SET_LEADING_DOT])dnl ++AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl ++]) ++ ++ ++# AM_DEP_TRACK ++# ------------ ++AC_DEFUN([AM_DEP_TRACK], ++[AC_ARG_ENABLE([dependency-tracking], [dnl ++AS_HELP_STRING( ++ [--enable-dependency-tracking], ++ [do not reject slow dependency extractors]) ++AS_HELP_STRING( ++ [--disable-dependency-tracking], ++ [speeds up one-time build])]) ++if test "x$enable_dependency_tracking" != xno; then ++ am_depcomp="$ac_aux_dir/depcomp" ++ AMDEPBACKSLASH='\' ++ am__nodep='_no' ++fi ++AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) ++AC_SUBST([AMDEPBACKSLASH])dnl ++_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl ++AC_SUBST([am__nodep])dnl ++_AM_SUBST_NOTMAKE([am__nodep])dnl ++]) ++ ++# Generate code to set up dependency tracking. -*- Autoconf -*- ++ ++# Copyright (C) 1999-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# _AM_OUTPUT_DEPENDENCY_COMMANDS ++# ------------------------------ ++AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], ++[{ ++ # Older Autoconf quotes --file arguments for eval, but not when files ++ # are listed without --file. Let's play safe and only enable the eval ++ # if we detect the quoting. ++ # TODO: see whether this extra hack can be removed once we start ++ # requiring Autoconf 2.70 or later. ++ AS_CASE([$CONFIG_FILES], ++ [*\'*], [eval set x "$CONFIG_FILES"], ++ [*], [set x $CONFIG_FILES]) ++ shift ++ # Used to flag and report bootstrapping failures. ++ am_rc=0 ++ for am_mf ++ do ++ # Strip MF so we end up with the name of the file. ++ am_mf=`AS_ECHO(["$am_mf"]) | sed -e 's/:.*$//'` ++ # Check whether this is an Automake generated Makefile which includes ++ # dependency-tracking related rules and includes. ++ # Grep'ing the whole file directly is not great: AIX grep has a line ++ # limit of 2048, but all sed's we know have understand at least 4000. ++ sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ ++ || continue ++ am_dirpart=`AS_DIRNAME(["$am_mf"])` ++ am_filepart=`AS_BASENAME(["$am_mf"])` ++ AM_RUN_LOG([cd "$am_dirpart" \ ++ && sed -e '/# am--include-marker/d' "$am_filepart" \ ++ | $MAKE -f - am--depfiles]) || am_rc=$? ++ done ++ if test $am_rc -ne 0; then ++ AC_MSG_FAILURE([Something went wrong bootstrapping makefile fragments ++ for automatic dependency tracking. If GNU make was not used, consider ++ re-running the configure script with MAKE="gmake" (or whatever is ++ necessary). You can also try re-running configure with the ++ '--disable-dependency-tracking' option to at least be able to build ++ the package (albeit without support for automatic dependency tracking).]) ++ fi ++ AS_UNSET([am_dirpart]) ++ AS_UNSET([am_filepart]) ++ AS_UNSET([am_mf]) ++ AS_UNSET([am_rc]) ++ rm -f conftest-deps.mk ++} ++])# _AM_OUTPUT_DEPENDENCY_COMMANDS ++ ++ ++# AM_OUTPUT_DEPENDENCY_COMMANDS ++# ----------------------------- ++# This macro should only be invoked once -- use via AC_REQUIRE. ++# ++# This code is only required when automatic dependency tracking is enabled. ++# This creates each '.Po' and '.Plo' makefile fragment that we'll need in ++# order to bootstrap the dependency handling code. ++AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], ++[AC_CONFIG_COMMANDS([depfiles], ++ [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], ++ [AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"])]) ++ ++# Do all the work for Automake. -*- Autoconf -*- ++ ++# Copyright (C) 1996-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# This macro actually does too much. Some checks are only needed if ++# your package does certain things. But this isn't really a big deal. ++ ++dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O. ++m4_define([AC_PROG_CC], ++m4_defn([AC_PROG_CC]) ++[_AM_PROG_CC_C_O ++]) ++ ++# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) ++# AM_INIT_AUTOMAKE([OPTIONS]) ++# ----------------------------------------------- ++# The call with PACKAGE and VERSION arguments is the old style ++# call (pre autoconf-2.50), which is being phased out. PACKAGE ++# and VERSION should now be passed to AC_INIT and removed from ++# the call to AM_INIT_AUTOMAKE. ++# We support both call styles for the transition. After ++# the next Automake release, Autoconf can make the AC_INIT ++# arguments mandatory, and then we can depend on a new Autoconf ++# release and drop the old call support. ++AC_DEFUN([AM_INIT_AUTOMAKE], ++[AC_PREREQ([2.65])dnl ++m4_ifdef([_$0_ALREADY_INIT], ++ [m4_fatal([$0 expanded multiple times ++]m4_defn([_$0_ALREADY_INIT]))], ++ [m4_define([_$0_ALREADY_INIT], m4_expansion_stack)])dnl ++dnl Autoconf wants to disallow AM_ names. We explicitly allow ++dnl the ones we care about. ++m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl ++AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl ++AC_REQUIRE([AC_PROG_INSTALL])dnl ++if test "`cd $srcdir && pwd`" != "`pwd`"; then ++ # Use -I$(srcdir) only when $(srcdir) != ., so that make's output ++ # is not polluted with repeated "-I." ++ AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl ++ # test to see if srcdir already configured ++ if test -f $srcdir/config.status; then ++ AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) ++ fi ++fi ++ ++# test whether we have cygpath ++if test -z "$CYGPATH_W"; then ++ if (cygpath --version) >/dev/null 2>/dev/null; then ++ CYGPATH_W='cygpath -w' ++ else ++ CYGPATH_W=echo ++ fi ++fi ++AC_SUBST([CYGPATH_W]) ++ ++# Define the identity of the package. ++dnl Distinguish between old-style and new-style calls. ++m4_ifval([$2], ++[AC_DIAGNOSE([obsolete], ++ [$0: two- and three-arguments forms are deprecated.]) ++m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl ++ AC_SUBST([PACKAGE], [$1])dnl ++ AC_SUBST([VERSION], [$2])], ++[_AM_SET_OPTIONS([$1])dnl ++dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. ++m4_if( ++ m4_ifset([AC_PACKAGE_NAME], [ok]):m4_ifset([AC_PACKAGE_VERSION], [ok]), ++ [ok:ok],, ++ [m4_fatal([AC_INIT should be called with package and version arguments])])dnl ++ AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl ++ AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl ++ ++_AM_IF_OPTION([no-define],, ++[AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package]) ++ AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl ++ ++# Some tools Automake needs. ++AC_REQUIRE([AM_SANITY_CHECK])dnl ++AC_REQUIRE([AC_ARG_PROGRAM])dnl ++AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}]) ++AM_MISSING_PROG([AUTOCONF], [autoconf]) ++AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}]) ++AM_MISSING_PROG([AUTOHEADER], [autoheader]) ++AM_MISSING_PROG([MAKEINFO], [makeinfo]) ++AC_REQUIRE([AM_PROG_INSTALL_SH])dnl ++AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl ++AC_REQUIRE([AC_PROG_MKDIR_P])dnl ++# For better backward compatibility. To be removed once Automake 1.9.x ++# dies out for good. For more background, see: ++# ++# ++AC_SUBST([mkdir_p], ['$(MKDIR_P)']) ++# We need awk for the "check" target (and possibly the TAP driver). The ++# system "awk" is bad on some platforms. ++AC_REQUIRE([AC_PROG_AWK])dnl ++AC_REQUIRE([AC_PROG_MAKE_SET])dnl ++AC_REQUIRE([AM_SET_LEADING_DOT])dnl ++_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], ++ [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], ++ [_AM_PROG_TAR([v7])])]) ++_AM_IF_OPTION([no-dependencies],, ++[AC_PROVIDE_IFELSE([AC_PROG_CC], ++ [_AM_DEPENDENCIES([CC])], ++ [m4_define([AC_PROG_CC], ++ m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl ++AC_PROVIDE_IFELSE([AC_PROG_CXX], ++ [_AM_DEPENDENCIES([CXX])], ++ [m4_define([AC_PROG_CXX], ++ m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl ++AC_PROVIDE_IFELSE([AC_PROG_OBJC], ++ [_AM_DEPENDENCIES([OBJC])], ++ [m4_define([AC_PROG_OBJC], ++ m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl ++AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], ++ [_AM_DEPENDENCIES([OBJCXX])], ++ [m4_define([AC_PROG_OBJCXX], ++ m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl ++]) ++# Variables for tags utilities; see am/tags.am ++if test -z "$CTAGS"; then ++ CTAGS=ctags ++fi ++AC_SUBST([CTAGS]) ++if test -z "$ETAGS"; then ++ ETAGS=etags ++fi ++AC_SUBST([ETAGS]) ++if test -z "$CSCOPE"; then ++ CSCOPE=cscope ++fi ++AC_SUBST([CSCOPE]) ++ ++AC_REQUIRE([AM_SILENT_RULES])dnl ++dnl The testsuite driver may need to know about EXEEXT, so add the ++dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This ++dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. ++AC_CONFIG_COMMANDS_PRE(dnl ++[m4_provide_if([_AM_COMPILER_EXEEXT], ++ [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl ++ ++# POSIX will say in a future version that running "rm -f" with no argument ++# is OK; and we want to be able to make that assumption in our Makefile ++# recipes. So use an aggressive probe to check that the usage we want is ++# actually supported "in the wild" to an acceptable degree. ++# See automake bug#10828. ++# To make any issue more visible, cause the running configure to be aborted ++# by default if the 'rm' program in use doesn't match our expectations; the ++# user can still override this though. ++if rm -f && rm -fr && rm -rf; then : OK; else ++ cat >&2 <<'END' ++Oops! ++ ++Your 'rm' program seems unable to run without file operands specified ++on the command line, even when the '-f' option is present. This is contrary ++to the behaviour of most rm programs out there, and not conforming with ++the upcoming POSIX standard: ++ ++Please tell bug-automake@gnu.org about your system, including the value ++of your $PATH and any error possibly output before this message. This ++can help us improve future automake versions. ++ ++END ++ if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then ++ echo 'Configuration will proceed anyway, since you have set the' >&2 ++ echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 ++ echo >&2 ++ else ++ cat >&2 <<'END' ++Aborting the configuration process, to ensure you take notice of the issue. ++ ++You can download and install GNU coreutils to get an 'rm' implementation ++that behaves properly: . ++ ++If you want to complete the configuration process using your problematic ++'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM ++to "yes", and re-run configure. ++ ++END ++ AC_MSG_ERROR([Your 'rm' program is bad, sorry.]) ++ fi ++fi ++dnl The trailing newline in this macro's definition is deliberate, for ++dnl backward compatibility and to allow trailing 'dnl'-style comments ++dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841. ++]) ++ ++dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not ++dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further ++dnl mangled by Autoconf and run in a shell conditional statement. ++m4_define([_AC_COMPILER_EXEEXT], ++m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) ++ ++# When config.status generates a header, we must update the stamp-h file. ++# This file resides in the same directory as the config header ++# that is generated. The stamp files are numbered to have different names. ++ ++# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the ++# loop where config.status creates the headers, so we can generate ++# our stamp files there. ++AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], ++[# Compute $1's index in $config_headers. ++_am_arg=$1 ++_am_stamp_count=1 ++for _am_header in $config_headers :; do ++ case $_am_header in ++ $_am_arg | $_am_arg:* ) ++ break ;; ++ * ) ++ _am_stamp_count=`expr $_am_stamp_count + 1` ;; ++ esac ++done ++echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) ++ ++# Copyright (C) 2001-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# AM_PROG_INSTALL_SH ++# ------------------ ++# Define $install_sh. ++AC_DEFUN([AM_PROG_INSTALL_SH], ++[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl ++if test x"${install_sh+set}" != xset; then ++ case $am_aux_dir in ++ *\ * | *\ *) ++ install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; ++ *) ++ install_sh="\${SHELL} $am_aux_dir/install-sh" ++ esac ++fi ++AC_SUBST([install_sh])]) ++ ++# Copyright (C) 2003-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# Check whether the underlying file-system supports filenames ++# with a leading dot. For instance MS-DOS doesn't. ++AC_DEFUN([AM_SET_LEADING_DOT], ++[rm -rf .tst 2>/dev/null ++mkdir .tst 2>/dev/null ++if test -d .tst; then ++ am__leading_dot=. ++else ++ am__leading_dot=_ ++fi ++rmdir .tst 2>/dev/null ++AC_SUBST([am__leading_dot])]) ++ ++# Add --enable-maintainer-mode option to configure. -*- Autoconf -*- ++# From Jim Meyering ++ ++# Copyright (C) 1996-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# AM_MAINTAINER_MODE([DEFAULT-MODE]) ++# ---------------------------------- ++# Control maintainer-specific portions of Makefiles. ++# Default is to disable them, unless 'enable' is passed literally. ++# For symmetry, 'disable' may be passed as well. Anyway, the user ++# can override the default with the --enable/--disable switch. ++AC_DEFUN([AM_MAINTAINER_MODE], ++[m4_case(m4_default([$1], [disable]), ++ [enable], [m4_define([am_maintainer_other], [disable])], ++ [disable], [m4_define([am_maintainer_other], [enable])], ++ [m4_define([am_maintainer_other], [enable]) ++ m4_warn([syntax], [unexpected argument to AM@&t@_MAINTAINER_MODE: $1])]) ++AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles]) ++ dnl maintainer-mode's default is 'disable' unless 'enable' is passed ++ AC_ARG_ENABLE([maintainer-mode], ++ [AS_HELP_STRING([--]am_maintainer_other[-maintainer-mode], ++ am_maintainer_other[ make rules and dependencies not useful ++ (and sometimes confusing) to the casual installer])], ++ [USE_MAINTAINER_MODE=$enableval], ++ [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes])) ++ AC_MSG_RESULT([$USE_MAINTAINER_MODE]) ++ AM_CONDITIONAL([MAINTAINER_MODE], [test $USE_MAINTAINER_MODE = yes]) ++ MAINT=$MAINTAINER_MODE_TRUE ++ AC_SUBST([MAINT])dnl ++] ++) ++ ++# Check to see how 'make' treats includes. -*- Autoconf -*- ++ ++# Copyright (C) 2001-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# AM_MAKE_INCLUDE() ++# ----------------- ++# Check whether make has an 'include' directive that can support all ++# the idioms we need for our automatic dependency tracking code. ++AC_DEFUN([AM_MAKE_INCLUDE], ++[AC_MSG_CHECKING([whether ${MAKE-make} supports the include directive]) ++cat > confinc.mk << 'END' ++am__doit: ++ @echo this is the am__doit target >confinc.out ++.PHONY: am__doit ++END ++am__include="#" ++am__quote= ++# BSD make does it like this. ++echo '.include "confinc.mk" # ignored' > confmf.BSD ++# Other make implementations (GNU, Solaris 10, AIX) do it like this. ++echo 'include confinc.mk # ignored' > confmf.GNU ++_am_result=no ++for s in GNU BSD; do ++ AM_RUN_LOG([${MAKE-make} -f confmf.$s && cat confinc.out]) ++ AS_CASE([$?:`cat confinc.out 2>/dev/null`], ++ ['0:this is the am__doit target'], ++ [AS_CASE([$s], ++ [BSD], [am__include='.include' am__quote='"'], ++ [am__include='include' am__quote=''])]) ++ if test "$am__include" != "#"; then ++ _am_result="yes ($s style)" ++ break ++ fi ++done ++rm -f confinc.* confmf.* ++AC_MSG_RESULT([${_am_result}]) ++AC_SUBST([am__include])]) ++AC_SUBST([am__quote])]) ++ ++# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- ++ ++# Copyright (C) 1997-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# AM_MISSING_PROG(NAME, PROGRAM) ++# ------------------------------ ++AC_DEFUN([AM_MISSING_PROG], ++[AC_REQUIRE([AM_MISSING_HAS_RUN]) ++$1=${$1-"${am_missing_run}$2"} ++AC_SUBST($1)]) ++ ++# AM_MISSING_HAS_RUN ++# ------------------ ++# Define MISSING if not defined so far and test if it is modern enough. ++# If it is, set am_missing_run to use it, otherwise, to nothing. ++AC_DEFUN([AM_MISSING_HAS_RUN], ++[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl ++AC_REQUIRE_AUX_FILE([missing])dnl ++if test x"${MISSING+set}" != xset; then ++ MISSING="\${SHELL} '$am_aux_dir/missing'" ++fi ++# Use eval to expand $SHELL ++if eval "$MISSING --is-lightweight"; then ++ am_missing_run="$MISSING " ++else ++ am_missing_run= ++ AC_MSG_WARN(['missing' script is too old or missing]) ++fi ++]) ++ ++# Helper functions for option handling. -*- Autoconf -*- ++ ++# Copyright (C) 2001-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# _AM_MANGLE_OPTION(NAME) ++# ----------------------- ++AC_DEFUN([_AM_MANGLE_OPTION], ++[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) ++ ++# _AM_SET_OPTION(NAME) ++# -------------------- ++# Set option NAME. Presently that only means defining a flag for this option. ++AC_DEFUN([_AM_SET_OPTION], ++[m4_define(_AM_MANGLE_OPTION([$1]), [1])]) ++ ++# _AM_SET_OPTIONS(OPTIONS) ++# ------------------------ ++# OPTIONS is a space-separated list of Automake options. ++AC_DEFUN([_AM_SET_OPTIONS], ++[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) ++ ++# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) ++# ------------------------------------------- ++# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. ++AC_DEFUN([_AM_IF_OPTION], ++[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) ++ ++# Copyright (C) 1999-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# _AM_PROG_CC_C_O ++# --------------- ++# Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC ++# to automatically call this. ++AC_DEFUN([_AM_PROG_CC_C_O], ++[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl ++AC_REQUIRE_AUX_FILE([compile])dnl ++AC_LANG_PUSH([C])dnl ++AC_CACHE_CHECK( ++ [whether $CC understands -c and -o together], ++ [am_cv_prog_cc_c_o], ++ [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])]) ++ # Make sure it works both with $CC and with simple cc. ++ # Following AC_PROG_CC_C_O, we do the test twice because some ++ # compilers refuse to overwrite an existing .o file with -o, ++ # though they will create one. ++ am_cv_prog_cc_c_o=yes ++ for am_i in 1 2; do ++ if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \ ++ && test -f conftest2.$ac_objext; then ++ : OK ++ else ++ am_cv_prog_cc_c_o=no ++ break ++ fi ++ done ++ rm -f core conftest* ++ unset am_i]) ++if test "$am_cv_prog_cc_c_o" != yes; then ++ # Losing compiler, so override with the script. ++ # FIXME: It is wrong to rewrite CC. ++ # But if we don't then we get into trouble of one sort or another. ++ # A longer-term fix would be to have automake use am__CC in this case, ++ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" ++ CC="$am_aux_dir/compile $CC" ++fi ++AC_LANG_POP([C])]) ++ ++# For backward compatibility. ++AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])]) ++ ++# Copyright (C) 2001-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# AM_RUN_LOG(COMMAND) ++# ------------------- ++# Run COMMAND, save the exit status in ac_status, and log it. ++# (This has been adapted from Autoconf's _AC_RUN_LOG macro.) ++AC_DEFUN([AM_RUN_LOG], ++[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD ++ ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD ++ (exit $ac_status); }]) ++ ++# Check to make sure that the build environment is sane. -*- Autoconf -*- ++ ++# Copyright (C) 1996-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# AM_SANITY_CHECK ++# --------------- ++AC_DEFUN([AM_SANITY_CHECK], ++[AC_MSG_CHECKING([whether build environment is sane]) ++# Reject unsafe characters in $srcdir or the absolute working directory ++# name. Accept space and tab only in the latter. ++am_lf=' ++' ++case `pwd` in ++ *[[\\\"\#\$\&\'\`$am_lf]]*) ++ AC_MSG_ERROR([unsafe absolute working directory name]);; ++esac ++case $srcdir in ++ *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) ++ AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);; ++esac ++ ++# Do 'set' in a subshell so we don't clobber the current shell's ++# arguments. Must try -L first in case configure is actually a ++# symlink; some systems play weird games with the mod time of symlinks ++# (eg FreeBSD returns the mod time of the symlink's containing ++# directory). ++if ( ++ am_has_slept=no ++ for am_try in 1 2; do ++ echo "timestamp, slept: $am_has_slept" > conftest.file ++ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` ++ if test "$[*]" = "X"; then ++ # -L didn't work. ++ set X `ls -t "$srcdir/configure" conftest.file` ++ fi ++ if test "$[*]" != "X $srcdir/configure conftest.file" \ ++ && test "$[*]" != "X conftest.file $srcdir/configure"; then ++ ++ # If neither matched, then we have a broken ls. This can happen ++ # if, for instance, CONFIG_SHELL is bash and it inherits a ++ # broken ls alias from the environment. This has actually ++ # happened. Such a system could not be considered "sane". ++ AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken ++ alias in your environment]) ++ fi ++ if test "$[2]" = conftest.file || test $am_try -eq 2; then ++ break ++ fi ++ # Just in case. ++ sleep 1 ++ am_has_slept=yes ++ done ++ test "$[2]" = conftest.file ++ ) ++then ++ # Ok. ++ : ++else ++ AC_MSG_ERROR([newly created file is older than distributed files! ++Check your system clock]) ++fi ++AC_MSG_RESULT([yes]) ++# If we didn't sleep, we still need to ensure time stamps of config.status and ++# generated files are strictly newer. ++am_sleep_pid= ++if grep 'slept: no' conftest.file >/dev/null 2>&1; then ++ ( sleep 1 ) & ++ am_sleep_pid=$! ++fi ++AC_CONFIG_COMMANDS_PRE( ++ [AC_MSG_CHECKING([that generated files are newer than configure]) ++ if test -n "$am_sleep_pid"; then ++ # Hide warnings about reused PIDs. ++ wait $am_sleep_pid 2>/dev/null ++ fi ++ AC_MSG_RESULT([done])]) ++rm -f conftest.file ++]) ++ ++# Copyright (C) 2009-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# AM_SILENT_RULES([DEFAULT]) ++# -------------------------- ++# Enable less verbose build rules; with the default set to DEFAULT ++# ("yes" being less verbose, "no" or empty being verbose). ++AC_DEFUN([AM_SILENT_RULES], ++[AC_ARG_ENABLE([silent-rules], [dnl ++AS_HELP_STRING( ++ [--enable-silent-rules], ++ [less verbose build output (undo: "make V=1")]) ++AS_HELP_STRING( ++ [--disable-silent-rules], ++ [verbose build output (undo: "make V=0")])dnl ++]) ++case $enable_silent_rules in @%:@ ((( ++ yes) AM_DEFAULT_VERBOSITY=0;; ++ no) AM_DEFAULT_VERBOSITY=1;; ++ *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; ++esac ++dnl ++dnl A few 'make' implementations (e.g., NonStop OS and NextStep) ++dnl do not support nested variable expansions. ++dnl See automake bug#9928 and bug#10237. ++am_make=${MAKE-make} ++AC_CACHE_CHECK([whether $am_make supports nested variables], ++ [am_cv_make_support_nested_variables], ++ [if AS_ECHO([['TRUE=$(BAR$(V)) ++BAR0=false ++BAR1=true ++V=1 ++am__doit: ++ @$(TRUE) ++.PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then ++ am_cv_make_support_nested_variables=yes ++else ++ am_cv_make_support_nested_variables=no ++fi]) ++if test $am_cv_make_support_nested_variables = yes; then ++ dnl Using '$V' instead of '$(V)' breaks IRIX make. ++ AM_V='$(V)' ++ AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' ++else ++ AM_V=$AM_DEFAULT_VERBOSITY ++ AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY ++fi ++AC_SUBST([AM_V])dnl ++AM_SUBST_NOTMAKE([AM_V])dnl ++AC_SUBST([AM_DEFAULT_V])dnl ++AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl ++AC_SUBST([AM_DEFAULT_VERBOSITY])dnl ++AM_BACKSLASH='\' ++AC_SUBST([AM_BACKSLASH])dnl ++_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl ++]) ++ ++# Copyright (C) 2001-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# AM_PROG_INSTALL_STRIP ++# --------------------- ++# One issue with vendor 'install' (even GNU) is that you can't ++# specify the program used to strip binaries. This is especially ++# annoying in cross-compiling environments, where the build's strip ++# is unlikely to handle the host's binaries. ++# Fortunately install-sh will honor a STRIPPROG variable, so we ++# always use install-sh in "make install-strip", and initialize ++# STRIPPROG with the value of the STRIP variable (set by the user). ++AC_DEFUN([AM_PROG_INSTALL_STRIP], ++[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl ++# Installed binaries are usually stripped using 'strip' when the user ++# run "make install-strip". However 'strip' might not be the right ++# tool to use in cross-compilation environments, therefore Automake ++# will honor the 'STRIP' environment variable to overrule this program. ++dnl Don't test for $cross_compiling = yes, because it might be 'maybe'. ++if test "$cross_compiling" != no; then ++ AC_CHECK_TOOL([STRIP], [strip], :) ++fi ++INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" ++AC_SUBST([INSTALL_STRIP_PROGRAM])]) ++ ++# Copyright (C) 2006-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# _AM_SUBST_NOTMAKE(VARIABLE) ++# --------------------------- ++# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. ++# This macro is traced by Automake. ++AC_DEFUN([_AM_SUBST_NOTMAKE]) ++ ++# AM_SUBST_NOTMAKE(VARIABLE) ++# -------------------------- ++# Public sister of _AM_SUBST_NOTMAKE. ++AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) ++ ++# Check how to create a tarball. -*- Autoconf -*- ++ ++# Copyright (C) 2004-2021 Free Software Foundation, Inc. ++# ++# This file is free software; the Free Software Foundation ++# gives unlimited permission to copy and/or distribute it, ++# with or without modifications, as long as this notice is preserved. ++ ++# _AM_PROG_TAR(FORMAT) ++# -------------------- ++# Check how to create a tarball in format FORMAT. ++# FORMAT should be one of 'v7', 'ustar', or 'pax'. ++# ++# Substitute a variable $(am__tar) that is a command ++# writing to stdout a FORMAT-tarball containing the directory ++# $tardir. ++# tardir=directory && $(am__tar) > result.tar ++# ++# Substitute a variable $(am__untar) that extract such ++# a tarball read from stdin. ++# $(am__untar) < result.tar ++# ++AC_DEFUN([_AM_PROG_TAR], ++[# Always define AMTAR for backward compatibility. Yes, it's still used ++# in the wild :-( We should find a proper way to deprecate it ... ++AC_SUBST([AMTAR], ['$${TAR-tar}']) ++ ++# We'll loop over all known methods to create a tar archive until one works. ++_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' ++ ++m4_if([$1], [v7], ++ [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], ++ ++ [m4_case([$1], ++ [ustar], ++ [# The POSIX 1988 'ustar' format is defined with fixed-size fields. ++ # There is notably a 21 bits limit for the UID and the GID. In fact, ++ # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 ++ # and bug#13588). ++ am_max_uid=2097151 # 2^21 - 1 ++ am_max_gid=$am_max_uid ++ # The $UID and $GID variables are not portable, so we need to resort ++ # to the POSIX-mandated id(1) utility. Errors in the 'id' calls ++ # below are definitely unexpected, so allow the users to see them ++ # (that is, avoid stderr redirection). ++ am_uid=`id -u || echo unknown` ++ am_gid=`id -g || echo unknown` ++ AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format]) ++ if test $am_uid -le $am_max_uid; then ++ AC_MSG_RESULT([yes]) ++ else ++ AC_MSG_RESULT([no]) ++ _am_tools=none ++ fi ++ AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format]) ++ if test $am_gid -le $am_max_gid; then ++ AC_MSG_RESULT([yes]) ++ else ++ AC_MSG_RESULT([no]) ++ _am_tools=none ++ fi], ++ ++ [pax], ++ [], ++ ++ [m4_fatal([Unknown tar format])]) ++ ++ AC_MSG_CHECKING([how to create a $1 tar archive]) ++ ++ # Go ahead even if we have the value already cached. We do so because we ++ # need to set the values for the 'am__tar' and 'am__untar' variables. ++ _am_tools=${am_cv_prog_tar_$1-$_am_tools} ++ ++ for _am_tool in $_am_tools; do ++ case $_am_tool in ++ gnutar) ++ for _am_tar in tar gnutar gtar; do ++ AM_RUN_LOG([$_am_tar --version]) && break ++ done ++ am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' ++ am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' ++ am__untar="$_am_tar -xf -" ++ ;; ++ plaintar) ++ # Must skip GNU tar: if it does not support --format= it doesn't create ++ # ustar tarball either. ++ (tar --version) >/dev/null 2>&1 && continue ++ am__tar='tar chf - "$$tardir"' ++ am__tar_='tar chf - "$tardir"' ++ am__untar='tar xf -' ++ ;; ++ pax) ++ am__tar='pax -L -x $1 -w "$$tardir"' ++ am__tar_='pax -L -x $1 -w "$tardir"' ++ am__untar='pax -r' ++ ;; ++ cpio) ++ am__tar='find "$$tardir" -print | cpio -o -H $1 -L' ++ am__tar_='find "$tardir" -print | cpio -o -H $1 -L' ++ am__untar='cpio -i -H $1 -d' ++ ;; ++ none) ++ am__tar=false ++ am__tar_=false ++ am__untar=false ++ ;; ++ esac ++ ++ # If the value was cached, stop now. We just wanted to have am__tar ++ # and am__untar set. ++ test -n "${am_cv_prog_tar_$1}" && break ++ ++ # tar/untar a dummy directory, and stop if the command works. ++ rm -rf conftest.dir ++ mkdir conftest.dir ++ echo GrepMe > conftest.dir/file ++ AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) ++ rm -rf conftest.dir ++ if test -s conftest.tar; then ++ AM_RUN_LOG([$am__untar /dev/null 2>&1 && break ++ fi ++ done ++ rm -rf conftest.dir ++ ++ AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) ++ AC_MSG_RESULT([$am_cv_prog_tar_$1])]) ++ ++AC_SUBST([am__tar]) ++AC_SUBST([am__untar]) ++]) # _AM_PROG_TAR ++ +diff --git a/bolt-plugin/bolt-plugin.cc b/bolt-plugin/bolt-plugin.cc +new file mode 100644 +index 000000000..f65011fd1 +--- /dev/null ++++ b/bolt-plugin/bolt-plugin.cc +@@ -0,0 +1,1153 @@ ++/* bolt plugin for gold and/or GNU ld. ++ Copyright (C) 2022-2023 Free Software Foundation, Inc. ++ Contributed by Majin and Liyancheng. ++ ++This program is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++This program is distributed in the hope that it will be useful, but ++WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with this program; see the file COPYING3. If not see ++. */ ++ ++/* The plugin has only one external function: onload. Gold passes it an ++ array of function that the plugin uses to communicate back to gold. ++ ++ With the functions provided by gold, the plugin can be notified when ++ gold first analyzes a file and passes a symbol table back to gold. The ++ plugin is also notified when all symbols have been read and it is time ++ to generate machine code for the necessary symbols. ++ ++ More information at http://gcc.gnu.org/wiki/whopr/driver. */ ++ ++/* Firstly, this plugin read profile info from .text.fdo.func_name section from ++ each claim file and parse it into BOLT profile. ++ ++ The section read from the claim file will follow the following example. ++ .section .text.fdo.sort_array // Section name ++ .string ".fdo.caller sort_array" // Function name ++ .string ".fdo.caller.size 492" // Function size ++ .string ".fdo.caller.bind GLOBAL" // Bind type ++ .string "58" // branch source address ++ .string "0" // branch destination address ++ .string "336" // count ++ ++ The above is the case where the profile data comes from PGO. ++ If the data comes from AutoFDO, branch source address will be ++ BB address and branch destination address will be disabled. e.g. ++ .string "58" // BB address ++ .string "336" // count ++ ++ The BOLT profile file format follows the syntax below which defined in ++ llvm-bolt. ++ ++ Branch info mode when profile collect from PGO: ++ ++ ++ ++ ++ Examples: ++ ++ 1 main 58 1 main 78 0 100 ++ ++ BB info mode when profile collect from AutoFDO: ++ ++ ++ Examples: ++ ++ 1 main 58 100 ++ ++ Secondly, it also receive BOLT profile generated by perf2bolt. ++ ++ Finally, this plugin calls llvm-bolt to do optimizations after linkage. ++ ++*/ ++ ++#ifdef HAVE_CONFIG_H ++#include "config.h" ++#endif ++#if HAVE_STDINT_H ++#include ++#endif ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef HAVE_SYS_WAIT_H ++#include ++#endif ++#include ++#include ++#include "simple-object.h" ++#include "plugin-api.h" ++ ++namespace LIBIBERTY ++{ ++#include ++} ++using LIBIBERTY::xmalloc; ++using LIBIBERTY::lbasename; ++using LIBIBERTY::xstrdup; ++using LIBIBERTY::concat; ++using LIBIBERTY::lrealpath; ++ ++#include ++#include ++#include ++#include ++ ++using std::vector; ++using std::string; ++using std::map; ++using std::set; ++ ++static ld_plugin_register_claim_file register_claim_file = NULL; ++static ld_plugin_register_all_symbols_read register_all_symbols_read = NULL; ++static ld_plugin_register_cleanup register_cleanup = NULL; ++static ld_plugin_message message = NULL; ++ ++static enum ld_plugin_output_file_type linker_output; ++ ++extern "C" ++{ ++ enum ld_plugin_status onload (struct ld_plugin_tv *tv); ++} ++ ++/* C99 bool type cannot coerce parm 'gate' range, so use int here. */ ++ ++static void ++check_gate (int gate, enum ld_plugin_level level, const char *text) ++{ ++ if (gate) ++ { ++ return; ++ } ++ ++ if (message) ++ { ++ message (level, text); ++ } ++ else ++ { ++ /* Print msg to stderr if there is no nicer way to inform the user. */ ++ fprintf (stderr, "%s\n", text); ++ if (level == LDPL_FATAL) ++ { ++ abort (); ++ } ++ } ++} ++ ++/* This wrapper allows macro CHECK to be called with a non-integer argument ++ GATE. For pointer cases, GATE should be no-Null. */ ++ ++#define CHECK(GATE, LEVEL, TEXT) check_gate (((GATE) != 0), (LEVEL), (TEXT)) ++ ++#define __MSG_INFO__ ++#define __MSG_WARN__ ++#define __MSG_ERROR__ ++ ++#ifdef __MSG_INFO__ ++#define MSG_INFO(...) \ ++ if (message) \ ++ { \ ++ message (LDPL_INFO, "BOLT-PLUGIN-INFO: " __VA_ARGS__); \ ++ } \ ++ else \ ++ { \ ++ fprintf (stderr, "BOLT-PLUGIN-INFO: " __VA_ARGS__); \ ++ } ++#else ++#define MSG_INFO(...) ++#endif ++ ++#ifdef __MSG_WARN__ ++#define MSG_WARN(...) \ ++ if (message) \ ++ { \ ++ message (LDPL_WARNING, "BOLT-PLUGIN-WARNING: " __VA_ARGS__); \ ++ } \ ++ else \ ++ { \ ++ fprintf (stderr, "BOLT-PLUGIN-WARNING: " __VA_ARGS__); \ ++ } ++#else ++#define MSG_WARN(...) ++#endif ++ ++#ifdef __MSG_ERROR__ ++#define MSG_ERROR(...) \ ++ if (message) \ ++ { \ ++ message (LDPL_FATAL, "BOLT-PLUGIN-ERROR: " __VA_ARGS__); \ ++ } \ ++ else \ ++ { \ ++ fprintf (stderr, "BOLT-PLUGIN-ERROR: " __VA_ARGS__); \ ++ abort (); \ ++ } ++#else ++#define MSG_ERROR(...) ++#endif ++ ++#if HAVE_DOS_BASED_FILE_SYSTEM ++const char *separator = "\\"; ++#else ++const char *separator = "/"; ++#endif ++ ++/* Encapsulates object file data during symbol scan. */ ++struct plugin_objfile ++{ ++ simple_object_read *objfile; ++ const struct ld_plugin_input_file *file; ++}; ++ ++struct jump_info ++{ ++ string des_func_name; ++ string src_addr_offset; ++ string dst_addr_offset; ++ string count; ++}; ++ ++struct func_info ++{ ++ string function_name; ++ string bind_type; /* "GLOBAL","WEAK","LOCAL","UNKNOWN". */ ++ string size; ++ vector edges; ++}; ++ ++/* Define feedback data type. */ ++enum feedback_type ++{ ++ NULL_TYPE, /* No feedback data. */ ++ PGO_TYPE, /* Feedback data from PGO. */ ++ AFDO_TYPE, /* Feedback data from AutoFDO. */ ++ BOLT_TYPE, /* Feedback data from BOLT. */ ++}; ++ ++#define DEFAULT_BOLT_OUT_DIR (get_current_dir_name ()) ++#define DEFAULT_BOLT_OUT_NAME "default.fdata" ++#define DEFAULT_BOLT_OUT_NAME_SUFFIX ".fdata" ++ ++/* The FDO section's special prefix names. */ ++#define ASM_FDO_SECTION_PREFIX ".text.fdo." ++#define ASM_FDO_CALLER_FLAG ".fdo.caller " ++#define ASM_FDO_CALLER_BIND_FLAG ".fdo.caller.bind " ++#define ASM_FDO_CALLER_SIZE_FLAG ".fdo.caller.size " ++#define ASM_FDO_CALLEE_FLAG ".fdo.callee " ++ ++static int linker_output_set; ++ ++/* BOLT profile name generated by -fauto-bolt or ++ read from -fbolt-use. */ ++static string bolt_profile_name; ++ ++/* Path to save configuration file generated by -fauto-bolt. */ ++static string bolt_dir_path; ++ ++/* BOLT profile file FD generated by -fauto-bolt. */ ++static FILE *bolt_file_fd = NULL; ++ ++/* Temporary binary or dynamic file with reloc info. */ ++static string tmp_out_file_name = "a.out"; ++ ++/* Binary or dynamic file after BOLT. */ ++static string bolt_opt_target; ++ ++/* Format of bolt_optimize_options should be "reorder-functions=hfsort+ ...", ++ command 'llvm-bolt' has been added here. */ ++static string bolt_optimize_options ("llvm-bolt "); ++ ++static enum feedback_type fdo_type = feedback_type::NULL_TYPE; ++ ++static vector gcc_options; ++ ++/* Map of > */ ++static map> weak_functions; ++ ++/* Returns 1 if two strings have the same prefix. */ ++ ++inline static int ++is_prefix_of (const char *prefix, const char *str) ++{ ++ return strncmp (prefix, str, strlen (prefix)) == 0; ++} ++ ++static bool ++file_exist (const char *file_name) ++{ ++ if (file_name == nullptr) ++ { ++ MSG_ERROR ("file_exist get empty input file name."); ++ return false; ++ } ++ struct stat buffer; ++ if (stat (file_name, &buffer) == 0) ++ { ++ return true; ++ } ++ ++ MSG_WARN ("file_exist check failed: %s does not exist!", file_name); ++ return false; ++} ++ ++/* Popen run cmd, use safe character set for whitelist verification. */ ++ ++static void ++popen_run (const string& cmd) ++{ ++ for (const char &ch : cmd) ++ { ++ if ((ch >= '0' && ch <= '9') ++ || (ch >= 'A' && ch <= 'Z') ++ || (ch >= 'a' && ch <= 'z') ++ || (ch == ' ' || ch == '_') ++ || (ch == '-' || ch == '/') ++ || (ch == '.' || ch == '+') ++ || (ch == '=' || ch == '#')) ++ { ++ continue; ++ } ++ else ++ { ++ MSG_WARN ("Unsafe command: %s", cmd.c_str ()); ++ MSG_ERROR ("The command can only contain the following characters " ++ "0-9, A-Z, a-z, '_', '-', '/', ' ', '.', '+', '=', '#' "); ++ } ++ } ++ MSG_INFO ("Execute command: %s", cmd.c_str ()); ++ FILE *fd = popen (cmd.c_str (), "r"); ++ if (fd == nullptr) ++ { ++ MSG_WARN ("Execute command faild!"); ++ } ++ else ++ { ++ char result_buf[1024]; ++ while (fgets (result_buf, sizeof (result_buf), fd) != NULL) ++ { ++ if (result_buf[strlen (result_buf) - 1] == '\n') ++ { ++ result_buf[strlen (result_buf) - 1] = '\0'; ++ } ++ MSG_INFO ("%s", result_buf); ++ } ++ pclose (fd); ++ } ++} ++ ++/* Generate bolt optimize command. */ ++ ++static string ++generate_bolt_cmd () ++{ ++ string new_binary = tmp_out_file_name + ".bolt"; ++ string cmd; ++ ++ /* bolt_optimize_options != "llvm-bolt " ++ means that the user uses custom input options. */ ++ if (bolt_optimize_options != "llvm-bolt ") ++ { ++ cmd = bolt_optimize_options + " " + tmp_out_file_name ++ + " -o " + new_binary ++ + " -data=" + bolt_profile_name; ++ } ++ else ++ { ++ if (fdo_type == feedback_type::AFDO_TYPE) ++ { ++ cmd = string ("llvm-bolt -reorder-functions=hfsort+ ") ++ + tmp_out_file_name + " -o " + new_binary ++ + " -data=" + bolt_profile_name; ++ } ++ else if (fdo_type == feedback_type::PGO_TYPE ++ || fdo_type == feedback_type::BOLT_TYPE) ++ { ++ cmd = string ("llvm-bolt -reorder-blocks=cache+ ") ++ + string (" -reorder-functions=hfsort+ ") ++ + string (" -split-functions=3 -split-all-cold ") ++ + string (" -dyno-stats -icf=1 -use-gnu-stack ") ++ + tmp_out_file_name + " -o " + new_binary ++ + " -data=" + bolt_profile_name; ++ } ++ else ++ { ++ MSG_ERROR ("Invalid profile type!"); ++ return string (); ++ } ++ MSG_INFO ("Using the default llvm-bolt optimization option," ++ " manually specify this option by -fbolt-option. "); ++ } ++ return cmd; ++} ++ ++/* Execute BOLT optimization, backup original binary with .orig . */ ++ ++static void ++do_bolt_opt () ++{ ++ string cmd = generate_bolt_cmd (); ++ if (cmd.empty ()) ++ { ++ return; ++ } ++ popen_run (cmd); ++ string new_binary = tmp_out_file_name + ".bolt"; ++ if (file_exist (new_binary.c_str ())) ++ { ++ cmd = "mv -f " + tmp_out_file_name + " " + tmp_out_file_name + ".orig"; ++ popen_run (cmd); ++ ++ cmd = "cp -f " + new_binary + " " + tmp_out_file_name; ++ popen_run (cmd); ++ } ++ else ++ { ++ MSG_ERROR ("BOLT optimization fail!" ++ " Try installing llvm-bolt or" ++ " enabling relocation info with flag -Wl,-q"); ++ } ++} ++ ++/* If -fbolt-target is set and this binary is the target, return true. */ ++ ++inline static bool ++is_bolt_opt_target () ++{ ++ if (!bolt_opt_target.empty () ++ && strcmp (lbasename (tmp_out_file_name.c_str ()), ++ lbasename (bolt_opt_target.c_str ())) != 0) ++ { ++ MSG_INFO ("BOLT optmization target is %s, processing %s, skip.", ++ bolt_opt_target.c_str (), tmp_out_file_name.c_str ()); ++ return false; ++ } ++ return true; ++} ++ ++/* Remove temporary files after linkage, and do BOLT optimization. */ ++ ++static enum ld_plugin_status ++cleanup_handler () ++{ ++ if (bolt_file_fd) ++ { ++ fclose (bolt_file_fd); ++ } ++ ++ if (file_exist (tmp_out_file_name.c_str ()) ++ && file_exist (bolt_profile_name.c_str ()) ++ && is_bolt_opt_target ()) ++ { ++ do_bolt_opt (); ++ } ++ ++ return LDPS_OK; ++} ++ ++/* Open BOLT profile file generated by -fauto-bolt. */ ++ ++static void ++open_bolt_profile_file (const char *file_name) ++{ ++ if (file_name == NULL) ++ { ++ MSG_ERROR ("Empty BOLT profile name, exit!"); ++ } ++ ++ if (bolt_file_fd == NULL) ++ { ++ MSG_INFO ("Generate profile file for BOLT: %s", file_name); ++ bolt_file_fd = fopen (file_name, "wt"); ++ if (!bolt_file_fd) ++ { ++ MSG_ERROR ("Failed to open the file: %s." ++ " Please check whether the target path exists.", ++ file_name); ++ } ++ return; ++ } ++ else ++ { ++ MSG_WARN ("BOLT profile file: %s is open, skip.", file_name); ++ } ++} ++ ++/* In BOLT profile, function with same name represent as func_name/file_name/1, ++ also, `/` has been added in gcc/final.c, so add /1 if this function is same ++ name function. */ ++ ++static string ++add_suffix (string str) ++{ ++ if (str.empty () || (strstr (str.c_str (), "/") == NULL)) ++ { ++ return str; ++ } ++ ++ return str + "/1"; ++} ++ ++/* Dump function info to BOLT profile, bolt_file_fd does not need ++ to be closed here. */ ++ ++static void ++dump_func_to_bolt_profile_file (const struct func_info &func) ++{ ++ if (func.edges.empty ()) ++ { ++ return; ++ } ++ ++ if (!bolt_file_fd) ++ { ++ open_bolt_profile_file (bolt_profile_name.c_str ()); ++ ++ /* Check whether the feedback data is from AutoFDO. */ ++ if (fdo_type == feedback_type::AFDO_TYPE) ++ { ++ fprintf (bolt_file_fd, "no_lbr cycles:u:\n"); ++ } ++ } ++ ++ for (const auto &edge: func.edges) ++ { ++ if (fdo_type == feedback_type::PGO_TYPE) ++ { ++ fprintf (bolt_file_fd, "1 %s %s 1 %s %s 0 %s\n", ++ add_suffix (func.function_name).c_str (), ++ edge.src_addr_offset.c_str (), ++ add_suffix (edge.des_func_name).c_str (), ++ edge.dst_addr_offset.c_str (), edge.count.c_str ()); ++ } ++ else if (fdo_type == feedback_type::AFDO_TYPE) ++ { ++ fprintf (bolt_file_fd, "1 %s %s %s\n", ++ add_suffix (func.function_name).c_str (), ++ edge.src_addr_offset.c_str (), ++ edge.count.c_str ()); ++ } ++ } ++ ++ fflush (bolt_file_fd); ++} ++ ++/* Called by the linker when all symbols have been read. */ ++ ++static enum ld_plugin_status ++all_symbols_read_handler () ++{ ++ for (const auto &functions: weak_functions) ++ { ++ /* More than one weak function. */ ++ if (functions.second.size () > 1) ++ { ++ MSG_WARN ("The weak function: %s is confusing, take the first one.", ++ functions.first.c_str ()); ++ } ++ ++ dump_func_to_bolt_profile_file (functions.second[0]); ++ } ++ return LDPS_OK; ++} ++ ++/* Move pointer p to end and return end. */ ++ ++static char * ++get_next_content (char *p, char *end) ++{ ++ while (*p && p < end) ++ { ++ p++; ++ } ++ p++; ++ ++ return p; ++} ++ ++/* Process function head info. */ ++ ++static char * ++process_function_head (char *data , char *end, struct func_info *func) ++{ ++ CHECK (is_prefix_of (ASM_FDO_CALLER_FLAG, data), LDPL_FATAL, ++ "The function name is missing."); ++ func->function_name = xstrdup (data + strlen (ASM_FDO_CALLER_FLAG)); ++ data = get_next_content (data, end); ++ ++ CHECK (is_prefix_of (ASM_FDO_CALLER_SIZE_FLAG, data), LDPL_FATAL, ++ "The function size is missing."); ++ func->size = xstrdup (data + strlen (ASM_FDO_CALLER_SIZE_FLAG)); ++ data = get_next_content (data, end); ++ ++ CHECK (is_prefix_of (ASM_FDO_CALLER_BIND_FLAG, data), LDPL_FATAL, ++ "The function bind type is missing."); ++ func->bind_type = xstrdup (data + strlen (ASM_FDO_CALLER_BIND_FLAG)); ++ data = get_next_content (data, end); ++ return data; ++} ++ ++/* Read profile info from the symbol table located between data and end. */ ++ ++static void ++process_section (char *data, char *end) ++{ ++ struct func_info func; ++ ++ data = process_function_head (data, end, &func); ++ ++ while (*data && data < end) ++ { ++ struct jump_info jump; ++ ++ CHECK (data, LDPL_FATAL, "data is NULL"); ++ jump.src_addr_offset = xstrdup (data); ++ ++ data = get_next_content (data, end); ++ CHECK (data, LDPL_FATAL, "data is NULL"); ++ if (is_prefix_of (ASM_FDO_CALLEE_FLAG, data)) ++ { ++ jump.des_func_name = xstrdup (data + strlen (ASM_FDO_CALLEE_FLAG)); ++ jump.dst_addr_offset = "0"; ++ data = get_next_content (data, end); ++ CHECK (data, LDPL_FATAL, "data is NULL"); ++ } ++ else if (fdo_type == feedback_type::PGO_TYPE) ++ { ++ jump.des_func_name = func.function_name; ++ jump.dst_addr_offset = xstrdup (data); ++ data = get_next_content (data, end); ++ CHECK (data, LDPL_FATAL, "data is NULL"); ++ } ++ else ++ { ++ jump.des_func_name = func.function_name; ++ } ++ ++ jump.count = xstrdup (data); ++ data = get_next_content (data, end); ++ ++ func.edges.push_back (jump); ++ } ++ ++ if (is_prefix_of ("WEAK", func.bind_type.c_str ())) ++ { ++ weak_functions[func.function_name].push_back (func); ++ } ++ else ++ { ++ dump_func_to_bolt_profile_file (func); ++ } ++} ++ ++/* Process error when calling function process_symtab. */ ++ ++static int ++process_symtab_error (struct plugin_objfile *obj, char *secdatastart) ++{ ++ MSG_ERROR ("%s: corrupt object file.", obj->file->name); ++ ++ /* Force claim_file_handler to abandon this file. */ ++ if (secdatastart != NULL) ++ { ++ free (secdatastart); ++ } ++ return 0; ++} ++ ++/* Process one section of an object file. Return to 1 to continue processing ++ other sections which define in simple_object_find_sections. */ ++ ++static int ++process_symtab (void *data, const char *name, off_t offset, off_t length) ++{ ++ if (data == NULL) ++ { ++ MSG_WARN ("Empty symtab! skip it."); ++ return 0; ++ } ++ if (name == NULL) ++ { ++ MSG_WARN ("Empty symtab name! skip it."); ++ return 0; ++ } ++ struct plugin_objfile *obj = (struct plugin_objfile *)data; ++ char *secdatastart; ++ char *secdata; ++ ++ if (!is_prefix_of (ASM_FDO_SECTION_PREFIX, name)) ++ { ++ return 1; ++ } ++ ++ secdata = secdatastart = (char *)xmalloc (length * sizeof (char)); ++ offset += obj->file->offset; ++ if (offset != lseek (obj->file->fd, offset, SEEK_SET)) ++ { ++ return process_symtab_error (obj, secdatastart); ++ } ++ ++ do ++ { ++ ssize_t got = read (obj->file->fd, secdata, length); ++ ++ if (got == 0) ++ { ++ break; ++ } ++ else if (got > 0) ++ { ++ secdata += got; ++ length -= got; ++ } ++ else if (errno != EINTR) ++ { ++ return process_symtab_error (obj, secdatastart); ++ } ++ } ++ while (length > 0); ++ ++ if (length > 0) ++ { ++ return process_symtab_error (obj, secdatastart); ++ } ++ ++ process_section (secdatastart, secdata); ++ free (secdatastart); ++ return 1; ++} ++ ++/* Callback used by gold to check if the plugin will claim FILE. Writes ++ the result in CLAIMED. */ ++ ++static enum ld_plugin_status ++claim_file_handler (const struct ld_plugin_input_file *file, int *claimed) ++{ ++ struct plugin_objfile obj; ++ int err; ++ const char *errmsg = NULL; ++ /* If file is empty, bolt plugin do nothing and return ok. */ ++ if (file == NULL) ++ { ++ return LDPS_OK; ++ } ++ /* BOLT plugin does not need claimd number, so set *claimed to 0. */ ++ *claimed = 0; ++ ++ obj.file = file; ++ obj.objfile = simple_object_start_read (file->fd, file->offset, NULL, ++ &errmsg, &err); ++ ++ /* No file, but also no error code means unrecognized format, ++ skip it. */ ++ if (!obj.objfile && !err) ++ { ++ return LDPS_OK; ++ } ++ ++ if (obj.objfile) ++ { ++ simple_object_find_sections (obj.objfile, process_symtab, &obj, &err); ++ simple_object_release_read (obj.objfile); ++ } ++ ++ return LDPS_OK; ++} ++ ++/* Mangle filename path of BASE and output new allocated pointer with ++ mangled path. */ ++ ++static string ++mangle_path (const string &base) ++{ ++ if (base.empty ()) ++ { ++ return base; ++ } ++ ++ /* Convert '/' to '#', convert '..' to '^', ++ convert ':' to '~' on DOS based file system. */ ++ ++ string new_path; ++ int base_len = base.size (); ++ int l = 0; ++ int r = 0; ++ while (l < base_len) ++ { ++ while (r < base_len && base[r] != '/') ++ { ++ r++; ++ } ++ ++ int len = r - l; ++ if (len == 2 && base[r - 2] == '.' && base[r - 1] == '.') ++ { ++ new_path += '^'; ++ } ++ else ++ { ++ new_path += base.substr (l, r - l); ++ } ++ if (r < base_len) ++ { ++ new_path += '#'; ++ } ++ ++ r++; ++ l = r; ++ } ++ return new_path; ++} ++ ++/* Generate BOLT profile name from file_name. */ ++ ++static string ++generate_bolt_profile_name (string file_name) ++{ ++ if (!IS_ABSOLUTE_PATH (file_name.c_str ())) ++ { ++ if (!bolt_dir_path.empty ()) ++ { ++ file_name = concat (get_current_dir_name (), ++ separator, file_name.c_str (), NULL); ++ file_name = mangle_path (file_name); ++ } ++ else ++ { ++ bolt_dir_path = DEFAULT_BOLT_OUT_DIR; ++ } ++ } ++ file_name = concat (bolt_dir_path.c_str (), separator, file_name.c_str (), ++ NULL); ++ return file_name; ++} ++ ++/* Match option_prefix from gcc_options, return the index of gcc_options. */ ++ ++static int ++match_gcc_option (const char *option_prefix) ++{ ++ if (option_prefix == NULL) ++ { ++ return -1; ++ } ++ ++ for (size_t i = 0; i < gcc_options.size (); i++) ++ { ++ if (is_prefix_of (option_prefix, gcc_options[i].c_str ())) ++ { ++ return i; ++ } ++ } ++ ++ return -1; ++} ++ ++/* Get options form environment COLLECT_GCC_OPTIONS. */ ++ ++static void ++get_options_from_collect_gcc_options (const char *collect_gcc, ++ const char *collect_gcc_options) ++{ ++ /* When using GCC, collect_gcc will not be empty. */ ++ if (collect_gcc == NULL || collect_gcc_options == NULL) ++ { ++ return; ++ } ++ ++ size_t len = strlen (collect_gcc_options); ++ size_t r = 0; ++ while (r < len && collect_gcc_options[r] != '\0') ++ { ++ if (collect_gcc_options[r] == '\'') ++ { ++ string option; ++ ++r; ++ do ++ { ++ if (collect_gcc_options[r] == '\0') ++ { ++ MSG_ERROR ("Malformed COLLECT_GCC_OPTIONS"); ++ } ++ else if (is_prefix_of ("'\\''", &collect_gcc_options[r])) ++ { ++ option.push_back ('\''); ++ r += 4; ++ } ++ else if (collect_gcc_options[r] == '\'') ++ { ++ break; ++ } ++ else ++ { ++ option.push_back (collect_gcc_options[r]); ++ ++r; ++ } ++ } ++ while (1); ++ ++ if (!option.empty ()) ++ { ++ gcc_options.push_back (option); ++ } ++ } ++ ++r; ++ } ++} ++ ++/* Substitute comma with space in RAW_STRING, used for parser ++ -fbolt-option. */ ++ ++static string ++parser_bolt_optimize_option (string raw_string) ++{ ++ for (auto &ch : raw_string) ++ { ++ if (ch == ',') ++ { ++ ch = ' '; ++ } ++ } ++ ++ return raw_string; ++} ++ ++/* Process option -fauto-bolt. */ ++ ++static void ++process_auto_bolt_option (const string &flag_auto_bolt) ++{ ++ const int auto_bolt_index = match_gcc_option (flag_auto_bolt.c_str ()); ++ ++ if (auto_bolt_index != -1) ++ { ++ if (gcc_options[auto_bolt_index] == "-fauto-bolt") ++ { ++ MSG_INFO ("Use default output directory %s, ", DEFAULT_BOLT_OUT_DIR); ++ MSG_INFO ("Specify it using -fauto-bolt= if needed."); ++ } ++ else ++ { ++ string flag_auto_bolt_equal = "-fauto-bolt="; ++ bolt_dir_path = lrealpath (gcc_options[auto_bolt_index].substr ( ++ flag_auto_bolt_equal.size ()).c_str ()); ++ MSG_INFO ("Get bolt profile path: %s", bolt_dir_path.c_str ()); ++ } ++ bolt_profile_name = generate_bolt_profile_name(bolt_profile_name); ++ } ++} ++ ++/* Process option -fbolt-use=. */ ++ ++static void ++process_bolt_use_option (const string &flag_bolt_use) ++{ ++ const int bolt_use_index = match_gcc_option (flag_bolt_use.c_str ()); ++ ++ if (bolt_use_index != -1) ++ { ++ /* bolt_profile_name may be initialized in ++ function process_output_option. */ ++ bolt_profile_name = gcc_options[bolt_use_index].substr ( ++ flag_bolt_use.size ()).c_str (); ++ if (bolt_profile_name.empty ()) ++ { ++ bolt_profile_name = DEFAULT_BOLT_OUT_NAME; ++ } ++ MSG_INFO ("Get bolt profile: %s", bolt_profile_name.c_str ()); ++ } ++} ++ ++/* Process option -fbolt-target=. */ ++ ++static void ++process_bolt_target_option (const string &flag_bolt_target) ++{ ++ const int bolt_target_index = match_gcc_option (flag_bolt_target.c_str ()); ++ if (bolt_target_index != -1) ++ { ++ bolt_opt_target = gcc_options[bolt_target_index].substr ( ++ flag_bolt_target.size ()).c_str (); ++ MSG_INFO ("Get bolt target: %s", bolt_opt_target.c_str ()); ++ } ++} ++ ++/* Process option -fbolt-option=. */ ++ ++static void ++process_bolt_option (const string &flag_bolt_optimize_options) ++{ ++ const int bolt_optimize_options_index ++ = match_gcc_option (flag_bolt_optimize_options.c_str ()); ++ ++ if (bolt_optimize_options_index != -1) ++ { ++ bolt_optimize_options.append (parser_bolt_optimize_option ( ++ gcc_options[bolt_optimize_options_index].substr ( ++ flag_bolt_optimize_options.size ()).c_str ())); ++ ++ MSG_INFO ("Get bolt optimize options is %s", ++ bolt_optimize_options.c_str ()); ++ } ++} ++ ++/* If -o is specified, set binary name and bolt profile name. This ++ function must be called before the process_bolt_use_option function. */ ++ ++static void ++process_output_option (const string &flag_o) ++{ ++ const int o_index = match_gcc_option (flag_o.c_str ()); ++ if (o_index != -1) ++ { ++ tmp_out_file_name = gcc_options[o_index + 1]; ++ /* bolt_profile_name may be overridden in ++ function process_auto_bolt_option and ++ process_bolt_use_option. */ ++ bolt_profile_name = gcc_options[o_index + 1]; ++ bolt_profile_name.append (DEFAULT_BOLT_OUT_NAME_SUFFIX); ++ } ++ else ++ { ++ bolt_profile_name = DEFAULT_BOLT_OUT_NAME; ++ MSG_INFO ("Use default file name %s, specify it using -o if needed.", ++ DEFAULT_BOLT_OUT_NAME); ++ } ++} ++ ++/* Parse the plugin options. */ ++ ++static void ++process_gcc_option () ++{ ++ string flag_profile_use = "-fprofile-use"; ++ string flag_auto_profile = "-fauto-profile"; ++ string flag_auto_bolt = "-fauto-bolt"; ++ string flag_bolt_use = "-fbolt-use="; ++ string flag_bolt_target = "-fbolt-target="; ++ string flag_bolt_optimize_options = "-fbolt-option="; ++ string flag_o = "-o"; ++ ++ char *collect_gcc = getenv ("COLLECT_GCC"); ++ char *collect_gcc_option = getenv ("COLLECT_GCC_OPTIONS"); ++ ++ get_options_from_collect_gcc_options (collect_gcc, collect_gcc_option); ++ ++ /* Function process_output_option should be processed before ++ process_auto_bolt_option to obtain correct bolt_profile_name. */ ++ process_output_option (flag_o); ++ process_auto_bolt_option (flag_auto_bolt); ++ process_bolt_use_option (flag_bolt_use); ++ process_bolt_target_option (flag_bolt_target); ++ process_bolt_option (flag_bolt_optimize_options); ++ ++ if (match_gcc_option (flag_profile_use.c_str ()) != -1) ++ { ++ fdo_type = feedback_type::PGO_TYPE; ++ } ++ else if (match_gcc_option (flag_auto_profile.c_str ()) != -1) ++ { ++ fdo_type = feedback_type::AFDO_TYPE; ++ } ++ ++ if (match_gcc_option (flag_bolt_use.c_str ()) != -1) ++ { ++ fdo_type = feedback_type::BOLT_TYPE; ++ } ++ ++ if (fdo_type == feedback_type::NULL_TYPE) ++ { ++ MSG_ERROR ("No feedback data, maybe use -fprofile-use " ++ "-fbolt-use or -fauto-profile."); ++ } ++} ++ ++/* Register callback function including all_symbols_read_handler, ++ cleanup_handler and claim_file_handler. */ ++ ++static void ++register_callback_function () ++{ ++ enum ld_plugin_status status; ++ ++ if (linker_output_set && linker_output != LDPO_EXEC) ++ { ++ MSG_INFO ("This linker[%d] is not for exec, just skip.", linker_output); ++ return; ++ } ++ ++ CHECK (register_claim_file, LDPL_FATAL, "register_claim_file not found"); ++ status = register_claim_file (claim_file_handler); ++ CHECK (status == LDPS_OK, LDPL_FATAL, ++ "could not register the claim_file callback"); ++ ++ if (register_cleanup) ++ { ++ status = register_cleanup (cleanup_handler); ++ CHECK (status == LDPS_OK, LDPL_FATAL, ++ "could not register the cleanup callback"); ++ } ++ ++ if (register_all_symbols_read) ++ { ++ status = register_all_symbols_read (all_symbols_read_handler); ++ CHECK (status == LDPS_OK, LDPL_FATAL, ++ "could not register the all_symbols_read callback"); ++ } ++} ++ ++/* Called by gold after loading the plugin. TV is the transfer vector. */ ++ ++enum ld_plugin_status ++onload (struct ld_plugin_tv *tv) ++{ ++ struct ld_plugin_tv *p; ++ ++ p = tv; ++ while (p->tv_tag) ++ { ++ switch (p->tv_tag) ++ { ++ case LDPT_MESSAGE: ++ message = p->tv_u.tv_message; ++ break; ++ case LDPT_REGISTER_CLAIM_FILE_HOOK: ++ register_claim_file = p->tv_u.tv_register_claim_file; ++ break; ++ case LDPT_REGISTER_ALL_SYMBOLS_READ_HOOK: ++ register_all_symbols_read = p->tv_u.tv_register_all_symbols_read; ++ break; ++ case LDPT_REGISTER_CLEANUP_HOOK: ++ register_cleanup = p->tv_u.tv_register_cleanup; ++ break; ++ case LDPT_LINKER_OUTPUT: ++ linker_output = (enum ld_plugin_output_file_type)p->tv_u.tv_val; ++ linker_output_set = 1; ++ break; ++ default: ++ break; ++ } ++ p++; ++ } ++ ++ register_callback_function (); ++ process_gcc_option (); ++ ++ return LDPS_OK; ++} ++ +diff --git a/bolt-plugin/config.h.in b/bolt-plugin/config.h.in +new file mode 100644 +index 000000000..9e9d316ec +--- /dev/null ++++ b/bolt-plugin/config.h.in +@@ -0,0 +1,179 @@ ++/* config.h.in. Generated from configure.ac by autoheader. */ ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_DLFCN_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_INTTYPES_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_MINIX_CONFIG_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_STDINT_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_STDIO_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_STDLIB_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_STRINGS_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_STRING_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_SYS_STAT_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_SYS_TYPES_H ++ ++/* Define to 1 if you have that is POSIX.1 compatible. */ ++#undef HAVE_SYS_WAIT_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_UNISTD_H ++ ++/* Define to 1 if you have the header file. */ ++#undef HAVE_WCHAR_H ++ ++/* Define to the sub-directory where libtool stores uninstalled libraries. */ ++#undef LT_OBJDIR ++ ++/* Name of package */ ++#undef PACKAGE ++ ++/* Define to the address where bug reports for this package should be sent. */ ++#undef PACKAGE_BUGREPORT ++ ++/* Define to the full name of this package. */ ++#undef PACKAGE_NAME ++ ++/* Define to the full name and version of this package. */ ++#undef PACKAGE_STRING ++ ++/* Define to the one symbol short name of this package. */ ++#undef PACKAGE_TARNAME ++ ++/* Define to the home page for this package. */ ++#undef PACKAGE_URL ++ ++/* Define to the version of this package. */ ++#undef PACKAGE_VERSION ++ ++/* Define to 1 if all of the C90 standard headers exist (not just the ones ++ required in a freestanding environment). This macro is provided for ++ backward compatibility; new code need not use it. */ ++#undef STDC_HEADERS ++ ++/* Enable extensions on AIX 3, Interix. */ ++#ifndef _ALL_SOURCE ++# undef _ALL_SOURCE ++#endif ++/* Enable general extensions on macOS. */ ++#ifndef _DARWIN_C_SOURCE ++# undef _DARWIN_C_SOURCE ++#endif ++/* Enable general extensions on Solaris. */ ++#ifndef __EXTENSIONS__ ++# undef __EXTENSIONS__ ++#endif ++/* Enable GNU extensions on systems that have them. */ ++#ifndef _GNU_SOURCE ++# undef _GNU_SOURCE ++#endif ++/* Enable X/Open compliant socket functions that do not require linking ++ with -lxnet on HP-UX 11.11. */ ++#ifndef _HPUX_ALT_XOPEN_SOCKET_API ++# undef _HPUX_ALT_XOPEN_SOCKET_API ++#endif ++/* Identify the host operating system as Minix. ++ This macro does not affect the system headers' behavior. ++ A future release of Autoconf may stop defining this macro. */ ++#ifndef _MINIX ++# undef _MINIX ++#endif ++/* Enable general extensions on NetBSD. ++ Enable NetBSD compatibility extensions on Minix. */ ++#ifndef _NETBSD_SOURCE ++# undef _NETBSD_SOURCE ++#endif ++/* Enable OpenBSD compatibility extensions on NetBSD. ++ Oddly enough, this does nothing on OpenBSD. */ ++#ifndef _OPENBSD_SOURCE ++# undef _OPENBSD_SOURCE ++#endif ++/* Define to 1 if needed for POSIX-compatible behavior. */ ++#ifndef _POSIX_SOURCE ++# undef _POSIX_SOURCE ++#endif ++/* Define to 2 if needed for POSIX-compatible behavior. */ ++#ifndef _POSIX_1_SOURCE ++# undef _POSIX_1_SOURCE ++#endif ++/* Enable POSIX-compatible threading on Solaris. */ ++#ifndef _POSIX_PTHREAD_SEMANTICS ++# undef _POSIX_PTHREAD_SEMANTICS ++#endif ++/* Enable extensions specified by ISO/IEC TS 18661-5:2014. */ ++#ifndef __STDC_WANT_IEC_60559_ATTRIBS_EXT__ ++# undef __STDC_WANT_IEC_60559_ATTRIBS_EXT__ ++#endif ++/* Enable extensions specified by ISO/IEC TS 18661-1:2014. */ ++#ifndef __STDC_WANT_IEC_60559_BFP_EXT__ ++# undef __STDC_WANT_IEC_60559_BFP_EXT__ ++#endif ++/* Enable extensions specified by ISO/IEC TS 18661-2:2015. */ ++#ifndef __STDC_WANT_IEC_60559_DFP_EXT__ ++# undef __STDC_WANT_IEC_60559_DFP_EXT__ ++#endif ++/* Enable extensions specified by ISO/IEC TS 18661-4:2015. */ ++#ifndef __STDC_WANT_IEC_60559_FUNCS_EXT__ ++# undef __STDC_WANT_IEC_60559_FUNCS_EXT__ ++#endif ++/* Enable extensions specified by ISO/IEC TS 18661-3:2015. */ ++#ifndef __STDC_WANT_IEC_60559_TYPES_EXT__ ++# undef __STDC_WANT_IEC_60559_TYPES_EXT__ ++#endif ++/* Enable extensions specified by ISO/IEC TR 24731-2:2010. */ ++#ifndef __STDC_WANT_LIB_EXT2__ ++# undef __STDC_WANT_LIB_EXT2__ ++#endif ++/* Enable extensions specified by ISO/IEC 24747:2009. */ ++#ifndef __STDC_WANT_MATH_SPEC_FUNCS__ ++# undef __STDC_WANT_MATH_SPEC_FUNCS__ ++#endif ++/* Enable extensions on HP NonStop. */ ++#ifndef _TANDEM_SOURCE ++# undef _TANDEM_SOURCE ++#endif ++/* Enable X/Open extensions. Define to 500 only if necessary ++ to make mbstate_t available. */ ++#ifndef _XOPEN_SOURCE ++# undef _XOPEN_SOURCE ++#endif ++ ++ ++/* Version number of package */ ++#undef VERSION ++ ++/* Number of bits in a file offset, on hosts where this is settable. */ ++#undef _FILE_OFFSET_BITS ++ ++/* Define for large files, on AIX-style hosts. */ ++#undef _LARGE_FILES ++ ++/* Define for Solaris 2.5.1 so the uint64_t typedef from , ++ , or is not used. If the typedef were allowed, the ++ #define below would cause a syntax error. */ ++#undef _UINT64_T ++ ++/* Define to the type of a signed integer type of width exactly 64 bits if ++ such a type exists and the standard includes do not define it. */ ++#undef int64_t ++ ++/* Define to the type of an unsigned integer type of width exactly 64 bits if ++ such a type exists and the standard includes do not define it. */ ++#undef uint64_t +diff --git a/bolt-plugin/configure b/bolt-plugin/configure +new file mode 100755 +index 000000000..63bde9a41 +--- /dev/null ++++ b/bolt-plugin/configure +@@ -0,0 +1,20909 @@ ++#! /bin/sh ++# Guess values for system-dependent variables and create Makefiles. ++# Generated by GNU Autoconf 2.71 for bolt plugin for ld 0.1. ++# ++# ++# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation, ++# Inc. ++# ++# ++# This configure script is free software; the Free Software Foundation ++# gives unlimited permission to copy, distribute and modify it. ++## -------------------- ## ++## M4sh Initialization. ## ++## -------------------- ## ++ ++# Be more Bourne compatible ++DUALCASE=1; export DUALCASE # for MKS sh ++as_nop=: ++if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 ++then : ++ emulate sh ++ NULLCMD=: ++ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which ++ # is contrary to our usage. Disable this feature. ++ alias -g '${1+"$@"}'='"$@"' ++ setopt NO_GLOB_SUBST ++else $as_nop ++ case `(set -o) 2>/dev/null` in #( ++ *posix*) : ++ set -o posix ;; #( ++ *) : ++ ;; ++esac ++fi ++ ++ ++ ++# Reset variables that may have inherited troublesome values from ++# the environment. ++ ++# IFS needs to be set, to space, tab, and newline, in precisely that order. ++# (If _AS_PATH_WALK were called with IFS unset, it would have the ++# side effect of setting IFS to empty, thus disabling word splitting.) ++# Quoting is to prevent editors from complaining about space-tab. ++as_nl=' ++' ++export as_nl ++IFS=" "" $as_nl" ++ ++PS1='$ ' ++PS2='> ' ++PS4='+ ' ++ ++# Ensure predictable behavior from utilities with locale-dependent output. ++LC_ALL=C ++export LC_ALL ++LANGUAGE=C ++export LANGUAGE ++ ++# We cannot yet rely on "unset" to work, but we need these variables ++# to be unset--not just set to an empty or harmless value--now, to ++# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh). This construct ++# also avoids known problems related to "unset" and subshell syntax ++# in other old shells (e.g. bash 2.01 and pdksh 5.2.14). ++for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH ++do eval test \${$as_var+y} \ ++ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : ++done ++ ++# Ensure that fds 0, 1, and 2 are open. ++if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi ++if (exec 3>&2) ; then :; else exec 2>/dev/null; fi ++ ++# The user is always right. ++if ${PATH_SEPARATOR+false} :; then ++ PATH_SEPARATOR=: ++ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { ++ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || ++ PATH_SEPARATOR=';' ++ } ++fi ++ ++ ++# Find who we are. Look in the path if we contain no directory separator. ++as_myself= ++case $0 in #(( ++ *[\\/]* ) as_myself=$0 ;; ++ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ test -r "$as_dir$0" && as_myself=$as_dir$0 && break ++ done ++IFS=$as_save_IFS ++ ++ ;; ++esac ++# We did not find ourselves, most probably we were run as `sh COMMAND' ++# in which case we are not to be found in the path. ++if test "x$as_myself" = x; then ++ as_myself=$0 ++fi ++if test ! -f "$as_myself"; then ++ printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 ++ exit 1 ++fi ++ ++ ++# Use a proper internal environment variable to ensure we don't fall ++ # into an infinite loop, continuously re-executing ourselves. ++ if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then ++ _as_can_reexec=no; export _as_can_reexec; ++ # We cannot yet assume a decent shell, so we have to provide a ++# neutralization value for shells without unset; and this also ++# works around shells that cannot unset nonexistent variables. ++# Preserve -v and -x to the replacement shell. ++BASH_ENV=/dev/null ++ENV=/dev/null ++(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV ++case $- in # (((( ++ *v*x* | *x*v* ) as_opts=-vx ;; ++ *v* ) as_opts=-v ;; ++ *x* ) as_opts=-x ;; ++ * ) as_opts= ;; ++esac ++exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} ++# Admittedly, this is quite paranoid, since all the known shells bail ++# out after a failed `exec'. ++printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2 ++exit 255 ++ fi ++ # We don't want this to propagate to other subprocesses. ++ { _as_can_reexec=; unset _as_can_reexec;} ++if test "x$CONFIG_SHELL" = x; then ++ as_bourne_compatible="as_nop=: ++if test \${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 ++then : ++ emulate sh ++ NULLCMD=: ++ # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which ++ # is contrary to our usage. Disable this feature. ++ alias -g '\${1+\"\$@\"}'='\"\$@\"' ++ setopt NO_GLOB_SUBST ++else \$as_nop ++ case \`(set -o) 2>/dev/null\` in #( ++ *posix*) : ++ set -o posix ;; #( ++ *) : ++ ;; ++esac ++fi ++" ++ as_required="as_fn_return () { (exit \$1); } ++as_fn_success () { as_fn_return 0; } ++as_fn_failure () { as_fn_return 1; } ++as_fn_ret_success () { return 0; } ++as_fn_ret_failure () { return 1; } ++ ++exitcode=0 ++as_fn_success || { exitcode=1; echo as_fn_success failed.; } ++as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } ++as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } ++as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } ++if ( set x; as_fn_ret_success y && test x = \"\$1\" ) ++then : ++ ++else \$as_nop ++ exitcode=1; echo positional parameters were not saved. ++fi ++test x\$exitcode = x0 || exit 1 ++blah=\$(echo \$(echo blah)) ++test x\"\$blah\" = xblah || exit 1 ++test -x / || exit 1" ++ as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO ++ as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO ++ eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && ++ test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 ++ ++ test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( ++ ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ++ ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO ++ ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO ++ PATH=/empty FPATH=/empty; export PATH FPATH ++ test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ ++ || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1 ++test \$(( 1 + 1 )) = 2 || exit 1" ++ if (eval "$as_required") 2>/dev/null ++then : ++ as_have_required=yes ++else $as_nop ++ as_have_required=no ++fi ++ if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null ++then : ++ ++else $as_nop ++ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++as_found=false ++for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ as_found=: ++ case $as_dir in #( ++ /*) ++ for as_base in sh bash ksh sh5; do ++ # Try only shells that exist, to save several forks. ++ as_shell=$as_dir$as_base ++ if { test -f "$as_shell" || test -f "$as_shell.exe"; } && ++ as_run=a "$as_shell" -c "$as_bourne_compatible""$as_required" 2>/dev/null ++then : ++ CONFIG_SHELL=$as_shell as_have_required=yes ++ if as_run=a "$as_shell" -c "$as_bourne_compatible""$as_suggested" 2>/dev/null ++then : ++ break 2 ++fi ++fi ++ done;; ++ esac ++ as_found=false ++done ++IFS=$as_save_IFS ++if $as_found ++then : ++ ++else $as_nop ++ if { test -f "$SHELL" || test -f "$SHELL.exe"; } && ++ as_run=a "$SHELL" -c "$as_bourne_compatible""$as_required" 2>/dev/null ++then : ++ CONFIG_SHELL=$SHELL as_have_required=yes ++fi ++fi ++ ++ ++ if test "x$CONFIG_SHELL" != x ++then : ++ export CONFIG_SHELL ++ # We cannot yet assume a decent shell, so we have to provide a ++# neutralization value for shells without unset; and this also ++# works around shells that cannot unset nonexistent variables. ++# Preserve -v and -x to the replacement shell. ++BASH_ENV=/dev/null ++ENV=/dev/null ++(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV ++case $- in # (((( ++ *v*x* | *x*v* ) as_opts=-vx ;; ++ *v* ) as_opts=-v ;; ++ *x* ) as_opts=-x ;; ++ * ) as_opts= ;; ++esac ++exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} ++# Admittedly, this is quite paranoid, since all the known shells bail ++# out after a failed `exec'. ++printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2 ++exit 255 ++fi ++ ++ if test x$as_have_required = xno ++then : ++ printf "%s\n" "$0: This script requires a shell more modern than all" ++ printf "%s\n" "$0: the shells that I found on your system." ++ if test ${ZSH_VERSION+y} ; then ++ printf "%s\n" "$0: In particular, zsh $ZSH_VERSION has bugs and should" ++ printf "%s\n" "$0: be upgraded to zsh 4.3.4 or later." ++ else ++ printf "%s\n" "$0: Please tell bug-autoconf@gnu.org about your system, ++$0: including any error possibly output before this ++$0: message. Then install a modern shell, or manually run ++$0: the script under such a shell if you do have one." ++ fi ++ exit 1 ++fi ++fi ++fi ++SHELL=${CONFIG_SHELL-/bin/sh} ++export SHELL ++# Unset more variables known to interfere with behavior of common tools. ++CLICOLOR_FORCE= GREP_OPTIONS= ++unset CLICOLOR_FORCE GREP_OPTIONS ++ ++## --------------------- ## ++## M4sh Shell Functions. ## ++## --------------------- ## ++# as_fn_unset VAR ++# --------------- ++# Portably unset VAR. ++as_fn_unset () ++{ ++ { eval $1=; unset $1;} ++} ++as_unset=as_fn_unset ++ ++ ++# as_fn_set_status STATUS ++# ----------------------- ++# Set $? to STATUS, without forking. ++as_fn_set_status () ++{ ++ return $1 ++} # as_fn_set_status ++ ++# as_fn_exit STATUS ++# ----------------- ++# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. ++as_fn_exit () ++{ ++ set +e ++ as_fn_set_status $1 ++ exit $1 ++} # as_fn_exit ++# as_fn_nop ++# --------- ++# Do nothing but, unlike ":", preserve the value of $?. ++as_fn_nop () ++{ ++ return $? ++} ++as_nop=as_fn_nop ++ ++# as_fn_mkdir_p ++# ------------- ++# Create "$as_dir" as a directory, including parents if necessary. ++as_fn_mkdir_p () ++{ ++ ++ case $as_dir in #( ++ -*) as_dir=./$as_dir;; ++ esac ++ test -d "$as_dir" || eval $as_mkdir_p || { ++ as_dirs= ++ while :; do ++ case $as_dir in #( ++ *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( ++ *) as_qdir=$as_dir;; ++ esac ++ as_dirs="'$as_qdir' $as_dirs" ++ as_dir=`$as_dirname -- "$as_dir" || ++$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ ++ X"$as_dir" : 'X\(//\)[^/]' \| \ ++ X"$as_dir" : 'X\(//\)$' \| \ ++ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || ++printf "%s\n" X"$as_dir" | ++ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\/\)[^/].*/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\/\)$/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\).*/{ ++ s//\1/ ++ q ++ } ++ s/.*/./; q'` ++ test -d "$as_dir" && break ++ done ++ test -z "$as_dirs" || eval "mkdir $as_dirs" ++ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" ++ ++ ++} # as_fn_mkdir_p ++ ++# as_fn_executable_p FILE ++# ----------------------- ++# Test if FILE is an executable regular file. ++as_fn_executable_p () ++{ ++ test -f "$1" && test -x "$1" ++} # as_fn_executable_p ++# as_fn_append VAR VALUE ++# ---------------------- ++# Append the text in VALUE to the end of the definition contained in VAR. Take ++# advantage of any shell optimizations that allow amortized linear growth over ++# repeated appends, instead of the typical quadratic growth present in naive ++# implementations. ++if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null ++then : ++ eval 'as_fn_append () ++ { ++ eval $1+=\$2 ++ }' ++else $as_nop ++ as_fn_append () ++ { ++ eval $1=\$$1\$2 ++ } ++fi # as_fn_append ++ ++# as_fn_arith ARG... ++# ------------------ ++# Perform arithmetic evaluation on the ARGs, and store the result in the ++# global $as_val. Take advantage of shells that can avoid forks. The arguments ++# must be portable across $(()) and expr. ++if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null ++then : ++ eval 'as_fn_arith () ++ { ++ as_val=$(( $* )) ++ }' ++else $as_nop ++ as_fn_arith () ++ { ++ as_val=`expr "$@" || test $? -eq 1` ++ } ++fi # as_fn_arith ++ ++# as_fn_nop ++# --------- ++# Do nothing but, unlike ":", preserve the value of $?. ++as_fn_nop () ++{ ++ return $? ++} ++as_nop=as_fn_nop ++ ++# as_fn_error STATUS ERROR [LINENO LOG_FD] ++# ---------------------------------------- ++# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are ++# provided, also output the error to LOG_FD, referencing LINENO. Then exit the ++# script with STATUS, using 1 if that was 0. ++as_fn_error () ++{ ++ as_status=$1; test $as_status -eq 0 && as_status=1 ++ if test "$4"; then ++ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 ++ fi ++ printf "%s\n" "$as_me: error: $2" >&2 ++ as_fn_exit $as_status ++} # as_fn_error ++ ++if expr a : '\(a\)' >/dev/null 2>&1 && ++ test "X`expr 00001 : '.*\(...\)'`" = X001; then ++ as_expr=expr ++else ++ as_expr=false ++fi ++ ++if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then ++ as_basename=basename ++else ++ as_basename=false ++fi ++ ++if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then ++ as_dirname=dirname ++else ++ as_dirname=false ++fi ++ ++as_me=`$as_basename -- "$0" || ++$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ ++ X"$0" : 'X\(//\)$' \| \ ++ X"$0" : 'X\(/\)' \| . 2>/dev/null || ++printf "%s\n" X/"$0" | ++ sed '/^.*\/\([^/][^/]*\)\/*$/{ ++ s//\1/ ++ q ++ } ++ /^X\/\(\/\/\)$/{ ++ s//\1/ ++ q ++ } ++ /^X\/\(\/\).*/{ ++ s//\1/ ++ q ++ } ++ s/.*/./; q'` ++ ++# Avoid depending upon Character Ranges. ++as_cr_letters='abcdefghijklmnopqrstuvwxyz' ++as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' ++as_cr_Letters=$as_cr_letters$as_cr_LETTERS ++as_cr_digits='0123456789' ++as_cr_alnum=$as_cr_Letters$as_cr_digits ++ ++ ++ as_lineno_1=$LINENO as_lineno_1a=$LINENO ++ as_lineno_2=$LINENO as_lineno_2a=$LINENO ++ eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && ++ test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { ++ # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) ++ sed -n ' ++ p ++ /[$]LINENO/= ++ ' <$as_myself | ++ sed ' ++ s/[$]LINENO.*/&-/ ++ t lineno ++ b ++ :lineno ++ N ++ :loop ++ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ ++ t loop ++ s/-\n.*// ++ ' >$as_me.lineno && ++ chmod +x "$as_me.lineno" || ++ { printf "%s\n" "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } ++ ++ # If we had to re-execute with $CONFIG_SHELL, we're ensured to have ++ # already done that, so ensure we don't try to do so again and fall ++ # in an infinite loop. This has already happened in practice. ++ _as_can_reexec=no; export _as_can_reexec ++ # Don't try to exec as it changes $[0], causing all sort of problems ++ # (the dirname of $[0] is not the place where we might find the ++ # original and so on. Autoconf is especially sensitive to this). ++ . "./$as_me.lineno" ++ # Exit status is that of the last command. ++ exit ++} ++ ++ ++# Determine whether it's possible to make 'echo' print without a newline. ++# These variables are no longer used directly by Autoconf, but are AC_SUBSTed ++# for compatibility with existing Makefiles. ++ECHO_C= ECHO_N= ECHO_T= ++case `echo -n x` in #((((( ++-n*) ++ case `echo 'xy\c'` in ++ *c*) ECHO_T=' ';; # ECHO_T is single tab character. ++ xy) ECHO_C='\c';; ++ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ++ ECHO_T=' ';; ++ esac;; ++*) ++ ECHO_N='-n';; ++esac ++ ++# For backward compatibility with old third-party macros, we provide ++# the shell variables $as_echo and $as_echo_n. New code should use ++# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively. ++as_echo='printf %s\n' ++as_echo_n='printf %s' ++ ++ ++rm -f conf$$ conf$$.exe conf$$.file ++if test -d conf$$.dir; then ++ rm -f conf$$.dir/conf$$.file ++else ++ rm -f conf$$.dir ++ mkdir conf$$.dir 2>/dev/null ++fi ++if (echo >conf$$.file) 2>/dev/null; then ++ if ln -s conf$$.file conf$$ 2>/dev/null; then ++ as_ln_s='ln -s' ++ # ... but there are two gotchas: ++ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. ++ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. ++ # In both cases, we have to default to `cp -pR'. ++ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || ++ as_ln_s='cp -pR' ++ elif ln conf$$.file conf$$ 2>/dev/null; then ++ as_ln_s=ln ++ else ++ as_ln_s='cp -pR' ++ fi ++else ++ as_ln_s='cp -pR' ++fi ++rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file ++rmdir conf$$.dir 2>/dev/null ++ ++if mkdir -p . 2>/dev/null; then ++ as_mkdir_p='mkdir -p "$as_dir"' ++else ++ test -d ./-p && rmdir ./-p ++ as_mkdir_p=false ++fi ++ ++as_test_x='test -x' ++as_executable_p=as_fn_executable_p ++ ++# Sed expression to map a string onto a valid CPP name. ++as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" ++ ++# Sed expression to map a string onto a valid variable name. ++as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" ++ ++SHELL=${CONFIG_SHELL-/bin/sh} ++ ++ ++test -n "$DJDIR" || exec 7<&0 &1 ++ ++# Name of the host. ++# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, ++# so uname gets run too. ++ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` ++ ++# ++# Initializations. ++# ++ac_default_prefix=/usr/local ++ac_clean_files= ++ac_config_libobj_dir=. ++LIBOBJS= ++cross_compiling=no ++subdirs= ++MFLAGS= ++MAKEFLAGS= ++ ++# Identity of this package. ++PACKAGE_NAME='bolt plugin for ld' ++PACKAGE_TARNAME='bolt-plugin' ++PACKAGE_VERSION='0.1' ++PACKAGE_STRING='bolt plugin for ld 0.1' ++PACKAGE_BUGREPORT='' ++PACKAGE_URL='' ++ ++# Factoring default headers for most tests. ++ac_includes_default="\ ++#include ++#ifdef HAVE_STDIO_H ++# include ++#endif ++#ifdef HAVE_STDLIB_H ++# include ++#endif ++#ifdef HAVE_STRING_H ++# include ++#endif ++#ifdef HAVE_INTTYPES_H ++# include ++#endif ++#ifdef HAVE_STDINT_H ++# include ++#endif ++#ifdef HAVE_STRINGS_H ++# include ++#endif ++#ifdef HAVE_SYS_TYPES_H ++# include ++#endif ++#ifdef HAVE_SYS_STAT_H ++# include ++#endif ++#ifdef HAVE_UNISTD_H ++# include ++#endif" ++ ++ac_header_c_list= ++ac_subst_vars='am__EXEEXT_FALSE ++am__EXEEXT_TRUE ++LTLIBOBJS ++LIBOBJS ++target_noncanonical ++CXXCPP ++LT_SYS_LIBRARY_PATH ++OTOOL64 ++OTOOL ++LIPO ++NMEDIT ++DSYMUTIL ++MANIFEST_TOOL ++RANLIB ++ac_ct_AR ++AR ++DLLTOOL ++OBJDUMP ++FILECMD ++LN_S ++NM ++ac_ct_DUMPBIN ++DUMPBIN ++LD ++FGREP ++EGREP ++GREP ++SED ++LIBTOOL ++real_target_noncanonical ++accel_dir_suffix ++gcc_build_dir ++ac_bolt_plugin_ldflags ++am__fastdepCXX_FALSE ++am__fastdepCXX_TRUE ++CXXDEPMODE ++ac_ct_CXX ++CXXFLAGS ++CXX ++am__fastdepCC_FALSE ++am__fastdepCC_TRUE ++CCDEPMODE ++am__nodep ++AMDEPBACKSLASH ++AMDEP_FALSE ++AMDEP_TRUE ++am__include ++DEPDIR ++OBJEXT ++EXEEXT ++ac_ct_CC ++CPPFLAGS ++LDFLAGS ++CFLAGS ++CC ++with_libiberty ++MAINT ++MAINTAINER_MODE_FALSE ++MAINTAINER_MODE_TRUE ++AM_BACKSLASH ++AM_DEFAULT_VERBOSITY ++AM_DEFAULT_V ++AM_V ++CSCOPE ++ETAGS ++CTAGS ++am__untar ++am__tar ++AMTAR ++am__leading_dot ++SET_MAKE ++AWK ++mkdir_p ++MKDIR_P ++INSTALL_STRIP_PROGRAM ++STRIP ++install_sh ++MAKEINFO ++AUTOHEADER ++AUTOMAKE ++AUTOCONF ++ACLOCAL ++VERSION ++PACKAGE ++CYGPATH_W ++am__isrc ++INSTALL_DATA ++INSTALL_SCRIPT ++INSTALL_PROGRAM ++target_os ++target_vendor ++target_cpu ++target ++host_os ++host_vendor ++host_cpu ++host ++build_os ++build_vendor ++build_cpu ++build ++target_alias ++host_alias ++build_alias ++LIBS ++ECHO_T ++ECHO_N ++ECHO_C ++DEFS ++mandir ++localedir ++libdir ++psdir ++pdfdir ++dvidir ++htmldir ++infodir ++docdir ++oldincludedir ++includedir ++runstatedir ++localstatedir ++sharedstatedir ++sysconfdir ++datadir ++datarootdir ++libexecdir ++sbindir ++bindir ++program_transform_name ++prefix ++exec_prefix ++PACKAGE_URL ++PACKAGE_BUGREPORT ++PACKAGE_STRING ++PACKAGE_VERSION ++PACKAGE_TARNAME ++PACKAGE_NAME ++PATH_SEPARATOR ++SHELL ++am__quote' ++ac_subst_files='' ++ac_user_opts=' ++enable_option_checking ++enable_silent_rules ++enable_maintainer_mode ++with_libiberty ++enable_dependency_tracking ++enable_largefile ++enable_shared ++enable_static ++with_pic ++enable_fast_install ++with_aix_soname ++with_gnu_ld ++with_sysroot ++enable_libtool_lock ++' ++ ac_precious_vars='build_alias ++host_alias ++target_alias ++CC ++CFLAGS ++LDFLAGS ++LIBS ++CPPFLAGS ++CXX ++CXXFLAGS ++CCC ++LT_SYS_LIBRARY_PATH ++CXXCPP' ++ ++ ++# Initialize some variables set by options. ++ac_init_help= ++ac_init_version=false ++ac_unrecognized_opts= ++ac_unrecognized_sep= ++# The variables have the same names as the options, with ++# dashes changed to underlines. ++cache_file=/dev/null ++exec_prefix=NONE ++no_create= ++no_recursion= ++prefix=NONE ++program_prefix=NONE ++program_suffix=NONE ++program_transform_name=s,x,x, ++silent= ++site= ++srcdir= ++verbose= ++x_includes=NONE ++x_libraries=NONE ++ ++# Installation directory options. ++# These are left unexpanded so users can "make install exec_prefix=/foo" ++# and all the variables that are supposed to be based on exec_prefix ++# by default will actually change. ++# Use braces instead of parens because sh, perl, etc. also accept them. ++# (The list follows the same order as the GNU Coding Standards.) ++bindir='${exec_prefix}/bin' ++sbindir='${exec_prefix}/sbin' ++libexecdir='${exec_prefix}/libexec' ++datarootdir='${prefix}/share' ++datadir='${datarootdir}' ++sysconfdir='${prefix}/etc' ++sharedstatedir='${prefix}/com' ++localstatedir='${prefix}/var' ++runstatedir='${localstatedir}/run' ++includedir='${prefix}/include' ++oldincludedir='/usr/include' ++docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' ++infodir='${datarootdir}/info' ++htmldir='${docdir}' ++dvidir='${docdir}' ++pdfdir='${docdir}' ++psdir='${docdir}' ++libdir='${exec_prefix}/lib' ++localedir='${datarootdir}/locale' ++mandir='${datarootdir}/man' ++ ++ac_prev= ++ac_dashdash= ++for ac_option ++do ++ # If the previous option needs an argument, assign it. ++ if test -n "$ac_prev"; then ++ eval $ac_prev=\$ac_option ++ ac_prev= ++ continue ++ fi ++ ++ case $ac_option in ++ *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; ++ *=) ac_optarg= ;; ++ *) ac_optarg=yes ;; ++ esac ++ ++ case $ac_dashdash$ac_option in ++ --) ++ ac_dashdash=yes ;; ++ ++ -bindir | --bindir | --bindi | --bind | --bin | --bi) ++ ac_prev=bindir ;; ++ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) ++ bindir=$ac_optarg ;; ++ ++ -build | --build | --buil | --bui | --bu) ++ ac_prev=build_alias ;; ++ -build=* | --build=* | --buil=* | --bui=* | --bu=*) ++ build_alias=$ac_optarg ;; ++ ++ -cache-file | --cache-file | --cache-fil | --cache-fi \ ++ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ++ ac_prev=cache_file ;; ++ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ ++ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) ++ cache_file=$ac_optarg ;; ++ ++ --config-cache | -C) ++ cache_file=config.cache ;; ++ ++ -datadir | --datadir | --datadi | --datad) ++ ac_prev=datadir ;; ++ -datadir=* | --datadir=* | --datadi=* | --datad=*) ++ datadir=$ac_optarg ;; ++ ++ -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ ++ | --dataroo | --dataro | --datar) ++ ac_prev=datarootdir ;; ++ -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ ++ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) ++ datarootdir=$ac_optarg ;; ++ ++ -disable-* | --disable-*) ++ ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` ++ # Reject names that are not valid shell variable names. ++ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && ++ as_fn_error $? "invalid feature name: \`$ac_useropt'" ++ ac_useropt_orig=$ac_useropt ++ ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` ++ case $ac_user_opts in ++ *" ++"enable_$ac_useropt" ++"*) ;; ++ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ++ ac_unrecognized_sep=', ';; ++ esac ++ eval enable_$ac_useropt=no ;; ++ ++ -docdir | --docdir | --docdi | --doc | --do) ++ ac_prev=docdir ;; ++ -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) ++ docdir=$ac_optarg ;; ++ ++ -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ++ ac_prev=dvidir ;; ++ -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) ++ dvidir=$ac_optarg ;; ++ ++ -enable-* | --enable-*) ++ ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` ++ # Reject names that are not valid shell variable names. ++ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && ++ as_fn_error $? "invalid feature name: \`$ac_useropt'" ++ ac_useropt_orig=$ac_useropt ++ ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` ++ case $ac_user_opts in ++ *" ++"enable_$ac_useropt" ++"*) ;; ++ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ++ ac_unrecognized_sep=', ';; ++ esac ++ eval enable_$ac_useropt=\$ac_optarg ;; ++ ++ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ ++ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ ++ | --exec | --exe | --ex) ++ ac_prev=exec_prefix ;; ++ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ ++ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ ++ | --exec=* | --exe=* | --ex=*) ++ exec_prefix=$ac_optarg ;; ++ ++ -gas | --gas | --ga | --g) ++ # Obsolete; use --with-gas. ++ with_gas=yes ;; ++ ++ -help | --help | --hel | --he | -h) ++ ac_init_help=long ;; ++ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ++ ac_init_help=recursive ;; ++ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ++ ac_init_help=short ;; ++ ++ -host | --host | --hos | --ho) ++ ac_prev=host_alias ;; ++ -host=* | --host=* | --hos=* | --ho=*) ++ host_alias=$ac_optarg ;; ++ ++ -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ++ ac_prev=htmldir ;; ++ -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ ++ | --ht=*) ++ htmldir=$ac_optarg ;; ++ ++ -includedir | --includedir | --includedi | --included | --include \ ++ | --includ | --inclu | --incl | --inc) ++ ac_prev=includedir ;; ++ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ ++ | --includ=* | --inclu=* | --incl=* | --inc=*) ++ includedir=$ac_optarg ;; ++ ++ -infodir | --infodir | --infodi | --infod | --info | --inf) ++ ac_prev=infodir ;; ++ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) ++ infodir=$ac_optarg ;; ++ ++ -libdir | --libdir | --libdi | --libd) ++ ac_prev=libdir ;; ++ -libdir=* | --libdir=* | --libdi=* | --libd=*) ++ libdir=$ac_optarg ;; ++ ++ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ ++ | --libexe | --libex | --libe) ++ ac_prev=libexecdir ;; ++ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ ++ | --libexe=* | --libex=* | --libe=*) ++ libexecdir=$ac_optarg ;; ++ ++ -localedir | --localedir | --localedi | --localed | --locale) ++ ac_prev=localedir ;; ++ -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) ++ localedir=$ac_optarg ;; ++ ++ -localstatedir | --localstatedir | --localstatedi | --localstated \ ++ | --localstate | --localstat | --localsta | --localst | --locals) ++ ac_prev=localstatedir ;; ++ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ ++ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) ++ localstatedir=$ac_optarg ;; ++ ++ -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ++ ac_prev=mandir ;; ++ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) ++ mandir=$ac_optarg ;; ++ ++ -nfp | --nfp | --nf) ++ # Obsolete; use --without-fp. ++ with_fp=no ;; ++ ++ -no-create | --no-create | --no-creat | --no-crea | --no-cre \ ++ | --no-cr | --no-c | -n) ++ no_create=yes ;; ++ ++ -no-recursion | --no-recursion | --no-recursio | --no-recursi \ ++ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) ++ no_recursion=yes ;; ++ ++ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ ++ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ ++ | --oldin | --oldi | --old | --ol | --o) ++ ac_prev=oldincludedir ;; ++ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ ++ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ ++ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) ++ oldincludedir=$ac_optarg ;; ++ ++ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ++ ac_prev=prefix ;; ++ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) ++ prefix=$ac_optarg ;; ++ ++ -program-prefix | --program-prefix | --program-prefi | --program-pref \ ++ | --program-pre | --program-pr | --program-p) ++ ac_prev=program_prefix ;; ++ -program-prefix=* | --program-prefix=* | --program-prefi=* \ ++ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) ++ program_prefix=$ac_optarg ;; ++ ++ -program-suffix | --program-suffix | --program-suffi | --program-suff \ ++ | --program-suf | --program-su | --program-s) ++ ac_prev=program_suffix ;; ++ -program-suffix=* | --program-suffix=* | --program-suffi=* \ ++ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) ++ program_suffix=$ac_optarg ;; ++ ++ -program-transform-name | --program-transform-name \ ++ | --program-transform-nam | --program-transform-na \ ++ | --program-transform-n | --program-transform- \ ++ | --program-transform | --program-transfor \ ++ | --program-transfo | --program-transf \ ++ | --program-trans | --program-tran \ ++ | --progr-tra | --program-tr | --program-t) ++ ac_prev=program_transform_name ;; ++ -program-transform-name=* | --program-transform-name=* \ ++ | --program-transform-nam=* | --program-transform-na=* \ ++ | --program-transform-n=* | --program-transform-=* \ ++ | --program-transform=* | --program-transfor=* \ ++ | --program-transfo=* | --program-transf=* \ ++ | --program-trans=* | --program-tran=* \ ++ | --progr-tra=* | --program-tr=* | --program-t=*) ++ program_transform_name=$ac_optarg ;; ++ ++ -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ++ ac_prev=pdfdir ;; ++ -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) ++ pdfdir=$ac_optarg ;; ++ ++ -psdir | --psdir | --psdi | --psd | --ps) ++ ac_prev=psdir ;; ++ -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) ++ psdir=$ac_optarg ;; ++ ++ -q | -quiet | --quiet | --quie | --qui | --qu | --q \ ++ | -silent | --silent | --silen | --sile | --sil) ++ silent=yes ;; ++ ++ -runstatedir | --runstatedir | --runstatedi | --runstated \ ++ | --runstate | --runstat | --runsta | --runst | --runs \ ++ | --run | --ru | --r) ++ ac_prev=runstatedir ;; ++ -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ ++ | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ ++ | --run=* | --ru=* | --r=*) ++ runstatedir=$ac_optarg ;; ++ ++ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ++ ac_prev=sbindir ;; ++ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ ++ | --sbi=* | --sb=*) ++ sbindir=$ac_optarg ;; ++ ++ -sharedstatedir | --sharedstatedir | --sharedstatedi \ ++ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ ++ | --sharedst | --shareds | --shared | --share | --shar \ ++ | --sha | --sh) ++ ac_prev=sharedstatedir ;; ++ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ ++ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ ++ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ ++ | --sha=* | --sh=*) ++ sharedstatedir=$ac_optarg ;; ++ ++ -site | --site | --sit) ++ ac_prev=site ;; ++ -site=* | --site=* | --sit=*) ++ site=$ac_optarg ;; ++ ++ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ++ ac_prev=srcdir ;; ++ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) ++ srcdir=$ac_optarg ;; ++ ++ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ ++ | --syscon | --sysco | --sysc | --sys | --sy) ++ ac_prev=sysconfdir ;; ++ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ ++ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) ++ sysconfdir=$ac_optarg ;; ++ ++ -target | --target | --targe | --targ | --tar | --ta | --t) ++ ac_prev=target_alias ;; ++ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) ++ target_alias=$ac_optarg ;; ++ ++ -v | -verbose | --verbose | --verbos | --verbo | --verb) ++ verbose=yes ;; ++ ++ -version | --version | --versio | --versi | --vers | -V) ++ ac_init_version=: ;; ++ ++ -with-* | --with-*) ++ ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` ++ # Reject names that are not valid shell variable names. ++ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && ++ as_fn_error $? "invalid package name: \`$ac_useropt'" ++ ac_useropt_orig=$ac_useropt ++ ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` ++ case $ac_user_opts in ++ *" ++"with_$ac_useropt" ++"*) ;; ++ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ++ ac_unrecognized_sep=', ';; ++ esac ++ eval with_$ac_useropt=\$ac_optarg ;; ++ ++ -without-* | --without-*) ++ ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` ++ # Reject names that are not valid shell variable names. ++ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && ++ as_fn_error $? "invalid package name: \`$ac_useropt'" ++ ac_useropt_orig=$ac_useropt ++ ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` ++ case $ac_user_opts in ++ *" ++"with_$ac_useropt" ++"*) ;; ++ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ++ ac_unrecognized_sep=', ';; ++ esac ++ eval with_$ac_useropt=no ;; ++ ++ --x) ++ # Obsolete; use --with-x. ++ with_x=yes ;; ++ ++ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ ++ | --x-incl | --x-inc | --x-in | --x-i) ++ ac_prev=x_includes ;; ++ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ ++ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) ++ x_includes=$ac_optarg ;; ++ ++ -x-libraries | --x-libraries | --x-librarie | --x-librari \ ++ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ++ ac_prev=x_libraries ;; ++ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ ++ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) ++ x_libraries=$ac_optarg ;; ++ ++ -*) as_fn_error $? "unrecognized option: \`$ac_option' ++Try \`$0 --help' for more information" ++ ;; ++ ++ *=*) ++ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` ++ # Reject names that are not valid shell variable names. ++ case $ac_envvar in #( ++ '' | [0-9]* | *[!_$as_cr_alnum]* ) ++ as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; ++ esac ++ eval $ac_envvar=\$ac_optarg ++ export $ac_envvar ;; ++ ++ *) ++ # FIXME: should be removed in autoconf 3.0. ++ printf "%s\n" "$as_me: WARNING: you should use --build, --host, --target" >&2 ++ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && ++ printf "%s\n" "$as_me: WARNING: invalid host type: $ac_option" >&2 ++ : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ++ ;; ++ ++ esac ++done ++ ++if test -n "$ac_prev"; then ++ ac_option=--`echo $ac_prev | sed 's/_/-/g'` ++ as_fn_error $? "missing argument to $ac_option" ++fi ++ ++if test -n "$ac_unrecognized_opts"; then ++ case $enable_option_checking in ++ no) ;; ++ fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; ++ *) printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; ++ esac ++fi ++ ++# Check all directory arguments for consistency. ++for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ ++ datadir sysconfdir sharedstatedir localstatedir includedir \ ++ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ ++ libdir localedir mandir runstatedir ++do ++ eval ac_val=\$$ac_var ++ # Remove trailing slashes. ++ case $ac_val in ++ */ ) ++ ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` ++ eval $ac_var=\$ac_val;; ++ esac ++ # Be sure to have absolute directory names. ++ case $ac_val in ++ [\\/$]* | ?:[\\/]* ) continue;; ++ NONE | '' ) case $ac_var in *prefix ) continue;; esac;; ++ esac ++ as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" ++done ++ ++# There might be people who depend on the old broken behavior: `$host' ++# used to hold the argument of --host etc. ++# FIXME: To remove some day. ++build=$build_alias ++host=$host_alias ++target=$target_alias ++ ++# FIXME: To remove some day. ++if test "x$host_alias" != x; then ++ if test "x$build_alias" = x; then ++ cross_compiling=maybe ++ elif test "x$build_alias" != "x$host_alias"; then ++ cross_compiling=yes ++ fi ++fi ++ ++ac_tool_prefix= ++test -n "$host_alias" && ac_tool_prefix=$host_alias- ++ ++test "$silent" = yes && exec 6>/dev/null ++ ++ ++ac_pwd=`pwd` && test -n "$ac_pwd" && ++ac_ls_di=`ls -di .` && ++ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || ++ as_fn_error $? "working directory cannot be determined" ++test "X$ac_ls_di" = "X$ac_pwd_ls_di" || ++ as_fn_error $? "pwd does not report name of working directory" ++ ++ ++# Find the source files, if location was not specified. ++if test -z "$srcdir"; then ++ ac_srcdir_defaulted=yes ++ # Try the directory containing this script, then the parent directory. ++ ac_confdir=`$as_dirname -- "$as_myself" || ++$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ ++ X"$as_myself" : 'X\(//\)[^/]' \| \ ++ X"$as_myself" : 'X\(//\)$' \| \ ++ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || ++printf "%s\n" X"$as_myself" | ++ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\/\)[^/].*/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\/\)$/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\).*/{ ++ s//\1/ ++ q ++ } ++ s/.*/./; q'` ++ srcdir=$ac_confdir ++ if test ! -r "$srcdir/$ac_unique_file"; then ++ srcdir=.. ++ fi ++else ++ ac_srcdir_defaulted=no ++fi ++if test ! -r "$srcdir/$ac_unique_file"; then ++ test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." ++ as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" ++fi ++ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ++ac_abs_confdir=`( ++ cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" ++ pwd)` ++# When building in place, set srcdir=. ++if test "$ac_abs_confdir" = "$ac_pwd"; then ++ srcdir=. ++fi ++# Remove unnecessary trailing slashes from srcdir. ++# Double slashes in file names in object file debugging info ++# mess up M-x gdb in Emacs. ++case $srcdir in ++*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; ++esac ++for ac_var in $ac_precious_vars; do ++ eval ac_env_${ac_var}_set=\${${ac_var}+set} ++ eval ac_env_${ac_var}_value=\$${ac_var} ++ eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} ++ eval ac_cv_env_${ac_var}_value=\$${ac_var} ++done ++ ++# ++# Report the --help message. ++# ++if test "$ac_init_help" = "long"; then ++ # Omit some internal or obsolete options to make the list less imposing. ++ # This message is too long to be a string in the A/UX 3.1 sh. ++ cat <<_ACEOF ++\`configure' configures bolt plugin for ld 0.1 to adapt to many kinds of systems. ++ ++Usage: $0 [OPTION]... [VAR=VALUE]... ++ ++To assign environment variables (e.g., CC, CFLAGS...), specify them as ++VAR=VALUE. See below for descriptions of some of the useful variables. ++ ++Defaults for the options are specified in brackets. ++ ++Configuration: ++ -h, --help display this help and exit ++ --help=short display options specific to this package ++ --help=recursive display the short help of all the included packages ++ -V, --version display version information and exit ++ -q, --quiet, --silent do not print \`checking ...' messages ++ --cache-file=FILE cache test results in FILE [disabled] ++ -C, --config-cache alias for \`--cache-file=config.cache' ++ -n, --no-create do not create output files ++ --srcdir=DIR find the sources in DIR [configure dir or \`..'] ++ ++Installation directories: ++ --prefix=PREFIX install architecture-independent files in PREFIX ++ [$ac_default_prefix] ++ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX ++ [PREFIX] ++ ++By default, \`make install' will install all the files in ++\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify ++an installation prefix other than \`$ac_default_prefix' using \`--prefix', ++for instance \`--prefix=\$HOME'. ++ ++For better control, use the options below. ++ ++Fine tuning of the installation directories: ++ --bindir=DIR user executables [EPREFIX/bin] ++ --sbindir=DIR system admin executables [EPREFIX/sbin] ++ --libexecdir=DIR program executables [EPREFIX/libexec] ++ --sysconfdir=DIR read-only single-machine data [PREFIX/etc] ++ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] ++ --localstatedir=DIR modifiable single-machine data [PREFIX/var] ++ --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] ++ --libdir=DIR object code libraries [EPREFIX/lib] ++ --includedir=DIR C header files [PREFIX/include] ++ --oldincludedir=DIR C header files for non-gcc [/usr/include] ++ --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] ++ --datadir=DIR read-only architecture-independent data [DATAROOTDIR] ++ --infodir=DIR info documentation [DATAROOTDIR/info] ++ --localedir=DIR locale-dependent data [DATAROOTDIR/locale] ++ --mandir=DIR man documentation [DATAROOTDIR/man] ++ --docdir=DIR documentation root [DATAROOTDIR/doc/bolt-plugin] ++ --htmldir=DIR html documentation [DOCDIR] ++ --dvidir=DIR dvi documentation [DOCDIR] ++ --pdfdir=DIR pdf documentation [DOCDIR] ++ --psdir=DIR ps documentation [DOCDIR] ++_ACEOF ++ ++ cat <<\_ACEOF ++ ++Program names: ++ --program-prefix=PREFIX prepend PREFIX to installed program names ++ --program-suffix=SUFFIX append SUFFIX to installed program names ++ --program-transform-name=PROGRAM run sed PROGRAM on installed program names ++ ++System types: ++ --build=BUILD configure for building on BUILD [guessed] ++ --host=HOST cross-compile to build programs to run on HOST [BUILD] ++ --target=TARGET configure for building compilers for TARGET [HOST] ++_ACEOF ++fi ++ ++if test -n "$ac_init_help"; then ++ case $ac_init_help in ++ short | recursive ) echo "Configuration of bolt plugin for ld 0.1:";; ++ esac ++ cat <<\_ACEOF ++ ++Optional Features: ++ --disable-option-checking ignore unrecognized --enable/--with options ++ --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) ++ --enable-FEATURE[=ARG] include FEATURE [ARG=yes] ++ --enable-silent-rules less verbose build output (undo: "make V=1") ++ --disable-silent-rules verbose build output (undo: "make V=0") ++ --enable-maintainer-mode ++ enable make rules and dependencies not useful (and ++ sometimes confusing) to the casual installer ++ --enable-dependency-tracking ++ do not reject slow dependency extractors ++ --disable-dependency-tracking ++ speeds up one-time build ++ --disable-largefile omit support for large files ++ --enable-shared[=PKGS] build shared libraries [default=yes] ++ --enable-static[=PKGS] build static libraries [default=yes] ++ --enable-fast-install[=PKGS] ++ optimize for fast installation [default=yes] ++ --disable-libtool-lock avoid locking (might break parallel builds) ++ ++Optional Packages: ++ --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] ++ --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) ++ --with-libiberty=PATH specify the directory where to find libiberty ++ [../libiberty] ++ --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use ++ both] ++ --with-aix-soname=aix|svr4|both ++ shared library versioning (aka "SONAME") variant to ++ provide on AIX, [default=aix]. ++ --with-gnu-ld assume the C compiler uses GNU ld [default=no] ++ --with-sysroot[=DIR] Search for dependent libraries within DIR (or the ++ compiler's sysroot if not specified). ++ ++Some influential environment variables: ++ CC C compiler command ++ CFLAGS C compiler flags ++ LDFLAGS linker flags, e.g. -L if you have libraries in a ++ nonstandard directory ++ LIBS libraries to pass to the linker, e.g. -l ++ CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if ++ you have headers in a nonstandard directory ++ CXX C++ compiler command ++ CXXFLAGS C++ compiler flags ++ LT_SYS_LIBRARY_PATH ++ User-defined run-time library search path. ++ CXXCPP C++ preprocessor ++ ++Use these variables to override the choices made by `configure' or to help ++it to find libraries and programs with nonstandard names/locations. ++ ++Report bugs to the package provider. ++_ACEOF ++ac_status=$? ++fi ++ ++if test "$ac_init_help" = "recursive"; then ++ # If there are subdirs, report their specific --help. ++ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue ++ test -d "$ac_dir" || ++ { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || ++ continue ++ ac_builddir=. ++ ++case "$ac_dir" in ++.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; ++*) ++ ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'` ++ # A ".." for each directory in $ac_dir_suffix. ++ ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` ++ case $ac_top_builddir_sub in ++ "") ac_top_builddir_sub=. ac_top_build_prefix= ;; ++ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; ++ esac ;; ++esac ++ac_abs_top_builddir=$ac_pwd ++ac_abs_builddir=$ac_pwd$ac_dir_suffix ++# for backward compatibility: ++ac_top_builddir=$ac_top_build_prefix ++ ++case $srcdir in ++ .) # We are building in place. ++ ac_srcdir=. ++ ac_top_srcdir=$ac_top_builddir_sub ++ ac_abs_top_srcdir=$ac_pwd ;; ++ [\\/]* | ?:[\\/]* ) # Absolute name. ++ ac_srcdir=$srcdir$ac_dir_suffix; ++ ac_top_srcdir=$srcdir ++ ac_abs_top_srcdir=$srcdir ;; ++ *) # Relative name. ++ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ++ ac_top_srcdir=$ac_top_build_prefix$srcdir ++ ac_abs_top_srcdir=$ac_pwd/$srcdir ;; ++esac ++ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix ++ ++ cd "$ac_dir" || { ac_status=$?; continue; } ++ # Check for configure.gnu first; this name is used for a wrapper for ++ # Metaconfig's "Configure" on case-insensitive file systems. ++ if test -f "$ac_srcdir/configure.gnu"; then ++ echo && ++ $SHELL "$ac_srcdir/configure.gnu" --help=recursive ++ elif test -f "$ac_srcdir/configure"; then ++ echo && ++ $SHELL "$ac_srcdir/configure" --help=recursive ++ else ++ printf "%s\n" "$as_me: WARNING: no configuration information is in $ac_dir" >&2 ++ fi || ac_status=$? ++ cd "$ac_pwd" || { ac_status=$?; break; } ++ done ++fi ++ ++test -n "$ac_init_help" && exit $ac_status ++if $ac_init_version; then ++ cat <<\_ACEOF ++bolt plugin for ld configure 0.1 ++generated by GNU Autoconf 2.71 ++ ++Copyright (C) 2021 Free Software Foundation, Inc. ++This configure script is free software; the Free Software Foundation ++gives unlimited permission to copy, distribute and modify it. ++_ACEOF ++ exit ++fi ++ ++## ------------------------ ## ++## Autoconf initialization. ## ++## ------------------------ ## ++ ++# ac_fn_c_try_compile LINENO ++# -------------------------- ++# Try to compile conftest.$ac_ext, and return whether this succeeded. ++ac_fn_c_try_compile () ++{ ++ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ rm -f conftest.$ac_objext conftest.beam ++ if { { ac_try="$ac_compile" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++printf "%s\n" "$ac_try_echo"; } >&5 ++ (eval "$ac_compile") 2>conftest.err ++ ac_status=$? ++ if test -s conftest.err; then ++ grep -v '^ *+' conftest.err >conftest.er1 ++ cat conftest.er1 >&5 ++ mv -f conftest.er1 conftest.err ++ fi ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } && { ++ test -z "$ac_c_werror_flag" || ++ test ! -s conftest.err ++ } && test -s conftest.$ac_objext ++then : ++ ac_retval=0 ++else $as_nop ++ printf "%s\n" "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ ac_retval=1 ++fi ++ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno ++ as_fn_set_status $ac_retval ++ ++} # ac_fn_c_try_compile ++ ++# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES ++# ------------------------------------------------------- ++# Tests whether HEADER exists and can be compiled using the include files in ++# INCLUDES, setting the cache variable VAR accordingly. ++ac_fn_c_check_header_compile () ++{ ++ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 ++printf %s "checking for $2... " >&6; } ++if eval test \${$3+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++$4 ++#include <$2> ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ eval "$3=yes" ++else $as_nop ++ eval "$3=no" ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++fi ++eval ac_res=\$$3 ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 ++printf "%s\n" "$ac_res" >&6; } ++ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno ++ ++} # ac_fn_c_check_header_compile ++ ++# ac_fn_cxx_try_compile LINENO ++# ---------------------------- ++# Try to compile conftest.$ac_ext, and return whether this succeeded. ++ac_fn_cxx_try_compile () ++{ ++ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ rm -f conftest.$ac_objext conftest.beam ++ if { { ac_try="$ac_compile" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++printf "%s\n" "$ac_try_echo"; } >&5 ++ (eval "$ac_compile") 2>conftest.err ++ ac_status=$? ++ if test -s conftest.err; then ++ grep -v '^ *+' conftest.err >conftest.er1 ++ cat conftest.er1 >&5 ++ mv -f conftest.er1 conftest.err ++ fi ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } && { ++ test -z "$ac_cxx_werror_flag" || ++ test ! -s conftest.err ++ } && test -s conftest.$ac_objext ++then : ++ ac_retval=0 ++else $as_nop ++ printf "%s\n" "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ ac_retval=1 ++fi ++ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno ++ as_fn_set_status $ac_retval ++ ++} # ac_fn_cxx_try_compile ++ ++# ac_fn_c_try_link LINENO ++# ----------------------- ++# Try to link conftest.$ac_ext, and return whether this succeeded. ++ac_fn_c_try_link () ++{ ++ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext ++ if { { ac_try="$ac_link" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++printf "%s\n" "$ac_try_echo"; } >&5 ++ (eval "$ac_link") 2>conftest.err ++ ac_status=$? ++ if test -s conftest.err; then ++ grep -v '^ *+' conftest.err >conftest.er1 ++ cat conftest.er1 >&5 ++ mv -f conftest.er1 conftest.err ++ fi ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } && { ++ test -z "$ac_c_werror_flag" || ++ test ! -s conftest.err ++ } && test -s conftest$ac_exeext && { ++ test "$cross_compiling" = yes || ++ test -x conftest$ac_exeext ++ } ++then : ++ ac_retval=0 ++else $as_nop ++ printf "%s\n" "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ ac_retval=1 ++fi ++ # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information ++ # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would ++ # interfere with the next link command; also delete a directory that is ++ # left behind by Apple's compiler. We do this before executing the actions. ++ rm -rf conftest.dSYM conftest_ipa8_conftest.oo ++ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno ++ as_fn_set_status $ac_retval ++ ++} # ac_fn_c_try_link ++ ++# ac_fn_c_check_func LINENO FUNC VAR ++# ---------------------------------- ++# Tests whether FUNC exists, setting the cache variable VAR accordingly ++ac_fn_c_check_func () ++{ ++ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 ++printf %s "checking for $2... " >&6; } ++if eval test \${$3+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++/* Define $2 to an innocuous variant, in case declares $2. ++ For example, HP-UX 11i declares gettimeofday. */ ++#define $2 innocuous_$2 ++ ++/* System header to define __stub macros and hopefully few prototypes, ++ which can conflict with char $2 (); below. */ ++ ++#include ++#undef $2 ++ ++/* Override any GCC internal prototype to avoid an error. ++ Use char because int might match the return type of a GCC ++ builtin and then its argument prototype would still apply. */ ++#ifdef __cplusplus ++extern "C" ++#endif ++char $2 (); ++/* The GNU C library defines this for functions which it implements ++ to always fail with ENOSYS. Some functions are actually named ++ something starting with __ and the normal name is an alias. */ ++#if defined __stub_$2 || defined __stub___$2 ++choke me ++#endif ++ ++int ++main (void) ++{ ++return $2 (); ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_link "$LINENO" ++then : ++ eval "$3=yes" ++else $as_nop ++ eval "$3=no" ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++fi ++eval ac_res=\$$3 ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 ++printf "%s\n" "$ac_res" >&6; } ++ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno ++ ++} # ac_fn_c_check_func ++ ++# ac_fn_cxx_try_cpp LINENO ++# ------------------------ ++# Try to preprocess conftest.$ac_ext, and return whether this succeeded. ++ac_fn_cxx_try_cpp () ++{ ++ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ if { { ac_try="$ac_cpp conftest.$ac_ext" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++printf "%s\n" "$ac_try_echo"; } >&5 ++ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ++ ac_status=$? ++ if test -s conftest.err; then ++ grep -v '^ *+' conftest.err >conftest.er1 ++ cat conftest.er1 >&5 ++ mv -f conftest.er1 conftest.err ++ fi ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } > conftest.i && { ++ test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || ++ test ! -s conftest.err ++ } ++then : ++ ac_retval=0 ++else $as_nop ++ printf "%s\n" "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ ac_retval=1 ++fi ++ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno ++ as_fn_set_status $ac_retval ++ ++} # ac_fn_cxx_try_cpp ++ ++# ac_fn_cxx_try_link LINENO ++# ------------------------- ++# Try to link conftest.$ac_ext, and return whether this succeeded. ++ac_fn_cxx_try_link () ++{ ++ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext ++ if { { ac_try="$ac_link" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++printf "%s\n" "$ac_try_echo"; } >&5 ++ (eval "$ac_link") 2>conftest.err ++ ac_status=$? ++ if test -s conftest.err; then ++ grep -v '^ *+' conftest.err >conftest.er1 ++ cat conftest.er1 >&5 ++ mv -f conftest.er1 conftest.err ++ fi ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } && { ++ test -z "$ac_cxx_werror_flag" || ++ test ! -s conftest.err ++ } && test -s conftest$ac_exeext && { ++ test "$cross_compiling" = yes || ++ test -x conftest$ac_exeext ++ } ++then : ++ ac_retval=0 ++else $as_nop ++ printf "%s\n" "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ ac_retval=1 ++fi ++ # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information ++ # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would ++ # interfere with the next link command; also delete a directory that is ++ # left behind by Apple's compiler. We do this before executing the actions. ++ rm -rf conftest.dSYM conftest_ipa8_conftest.oo ++ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno ++ as_fn_set_status $ac_retval ++ ++} # ac_fn_cxx_try_link ++ ++# ac_fn_c_find_intX_t LINENO BITS VAR ++# ----------------------------------- ++# Finds a signed integer type with width BITS, setting cache variable VAR ++# accordingly. ++ac_fn_c_find_intX_t () ++{ ++ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for int$2_t" >&5 ++printf %s "checking for int$2_t... " >&6; } ++if eval test \${$3+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ eval "$3=no" ++ # Order is important - never check a type that is potentially smaller ++ # than half of the expected target width. ++ for ac_type in int$2_t 'int' 'long int' \ ++ 'long long int' 'short int' 'signed char'; do ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++$ac_includes_default ++ enum { N = $2 / 2 - 1 }; ++int ++main (void) ++{ ++static int test_array [1 - 2 * !(0 < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1))]; ++test_array [0] = 0; ++return test_array [0]; ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++$ac_includes_default ++ enum { N = $2 / 2 - 1 }; ++int ++main (void) ++{ ++static int test_array [1 - 2 * !(($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1) ++ < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 2))]; ++test_array [0] = 0; ++return test_array [0]; ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ++else $as_nop ++ case $ac_type in #( ++ int$2_t) : ++ eval "$3=yes" ;; #( ++ *) : ++ eval "$3=\$ac_type" ;; ++esac ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ if eval test \"x\$"$3"\" = x"no" ++then : ++ ++else $as_nop ++ break ++fi ++ done ++fi ++eval ac_res=\$$3 ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 ++printf "%s\n" "$ac_res" >&6; } ++ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno ++ ++} # ac_fn_c_find_intX_t ++ ++# ac_fn_c_find_uintX_t LINENO BITS VAR ++# ------------------------------------ ++# Finds an unsigned integer type with width BITS, setting cache variable VAR ++# accordingly. ++ac_fn_c_find_uintX_t () ++{ ++ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for uint$2_t" >&5 ++printf %s "checking for uint$2_t... " >&6; } ++if eval test \${$3+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ eval "$3=no" ++ # Order is important - never check a type that is potentially smaller ++ # than half of the expected target width. ++ for ac_type in uint$2_t 'unsigned int' 'unsigned long int' \ ++ 'unsigned long long int' 'unsigned short int' 'unsigned char'; do ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++$ac_includes_default ++int ++main (void) ++{ ++static int test_array [1 - 2 * !((($ac_type) -1 >> ($2 / 2 - 1)) >> ($2 / 2 - 1) == 3)]; ++test_array [0] = 0; ++return test_array [0]; ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ case $ac_type in #( ++ uint$2_t) : ++ eval "$3=yes" ;; #( ++ *) : ++ eval "$3=\$ac_type" ;; ++esac ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ if eval test \"x\$"$3"\" = x"no" ++then : ++ ++else $as_nop ++ break ++fi ++ done ++fi ++eval ac_res=\$$3 ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 ++printf "%s\n" "$ac_res" >&6; } ++ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno ++ ++} # ac_fn_c_find_uintX_t ++ac_configure_args_raw= ++for ac_arg ++do ++ case $ac_arg in ++ *\'*) ++ ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; ++ esac ++ as_fn_append ac_configure_args_raw " '$ac_arg'" ++done ++ ++case $ac_configure_args_raw in ++ *$as_nl*) ++ ac_safe_unquote= ;; ++ *) ++ ac_unsafe_z='|&;<>()$`\\"*?[ '' ' # This string ends in space, tab. ++ ac_unsafe_a="$ac_unsafe_z#~" ++ ac_safe_unquote="s/ '\\([^$ac_unsafe_a][^$ac_unsafe_z]*\\)'/ \\1/g" ++ ac_configure_args_raw=` printf "%s\n" "$ac_configure_args_raw" | sed "$ac_safe_unquote"`;; ++esac ++ ++cat >config.log <<_ACEOF ++This file contains any messages produced by compilers while ++running configure, to aid debugging if configure makes a mistake. ++ ++It was created by bolt plugin for ld $as_me 0.1, which was ++generated by GNU Autoconf 2.71. Invocation command line was ++ ++ $ $0$ac_configure_args_raw ++ ++_ACEOF ++exec 5>>config.log ++{ ++cat <<_ASUNAME ++## --------- ## ++## Platform. ## ++## --------- ## ++ ++hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` ++uname -m = `(uname -m) 2>/dev/null || echo unknown` ++uname -r = `(uname -r) 2>/dev/null || echo unknown` ++uname -s = `(uname -s) 2>/dev/null || echo unknown` ++uname -v = `(uname -v) 2>/dev/null || echo unknown` ++ ++/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` ++/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` ++ ++/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` ++/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` ++/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` ++/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` ++/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` ++/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` ++/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` ++ ++_ASUNAME ++ ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ printf "%s\n" "PATH: $as_dir" ++ done ++IFS=$as_save_IFS ++ ++} >&5 ++ ++cat >&5 <<_ACEOF ++ ++ ++## ----------- ## ++## Core tests. ## ++## ----------- ## ++ ++_ACEOF ++ ++ ++# Keep a trace of the command line. ++# Strip out --no-create and --no-recursion so they do not pile up. ++# Strip out --silent because we don't want to record it for future runs. ++# Also quote any args containing shell meta-characters. ++# Make two passes to allow for proper duplicate-argument suppression. ++ac_configure_args= ++ac_configure_args0= ++ac_configure_args1= ++ac_must_keep_next=false ++for ac_pass in 1 2 ++do ++ for ac_arg ++ do ++ case $ac_arg in ++ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; ++ -q | -quiet | --quiet | --quie | --qui | --qu | --q \ ++ | -silent | --silent | --silen | --sile | --sil) ++ continue ;; ++ *\'*) ++ ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; ++ esac ++ case $ac_pass in ++ 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; ++ 2) ++ as_fn_append ac_configure_args1 " '$ac_arg'" ++ if test $ac_must_keep_next = true; then ++ ac_must_keep_next=false # Got value, back to normal. ++ else ++ case $ac_arg in ++ *=* | --config-cache | -C | -disable-* | --disable-* \ ++ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ ++ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ ++ | -with-* | --with-* | -without-* | --without-* | --x) ++ case "$ac_configure_args0 " in ++ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; ++ esac ++ ;; ++ -* ) ac_must_keep_next=true ;; ++ esac ++ fi ++ as_fn_append ac_configure_args " '$ac_arg'" ++ ;; ++ esac ++ done ++done ++{ ac_configure_args0=; unset ac_configure_args0;} ++{ ac_configure_args1=; unset ac_configure_args1;} ++ ++# When interrupted or exit'd, cleanup temporary files, and complete ++# config.log. We remove comments because anyway the quotes in there ++# would cause problems or look ugly. ++# WARNING: Use '\'' to represent an apostrophe within the trap. ++# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. ++trap 'exit_status=$? ++ # Sanitize IFS. ++ IFS=" "" $as_nl" ++ # Save into config.log some information that might help in debugging. ++ { ++ echo ++ ++ printf "%s\n" "## ---------------- ## ++## Cache variables. ## ++## ---------------- ##" ++ echo ++ # The following way of writing the cache mishandles newlines in values, ++( ++ for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do ++ eval ac_val=\$$ac_var ++ case $ac_val in #( ++ *${as_nl}*) ++ case $ac_var in #( ++ *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 ++printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; ++ esac ++ case $ac_var in #( ++ _ | IFS | as_nl) ;; #( ++ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( ++ *) { eval $ac_var=; unset $ac_var;} ;; ++ esac ;; ++ esac ++ done ++ (set) 2>&1 | ++ case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( ++ *${as_nl}ac_space=\ *) ++ sed -n \ ++ "s/'\''/'\''\\\\'\'''\''/g; ++ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ++ ;; #( ++ *) ++ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ++ ;; ++ esac | ++ sort ++) ++ echo ++ ++ printf "%s\n" "## ----------------- ## ++## Output variables. ## ++## ----------------- ##" ++ echo ++ for ac_var in $ac_subst_vars ++ do ++ eval ac_val=\$$ac_var ++ case $ac_val in ++ *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; ++ esac ++ printf "%s\n" "$ac_var='\''$ac_val'\''" ++ done | sort ++ echo ++ ++ if test -n "$ac_subst_files"; then ++ printf "%s\n" "## ------------------- ## ++## File substitutions. ## ++## ------------------- ##" ++ echo ++ for ac_var in $ac_subst_files ++ do ++ eval ac_val=\$$ac_var ++ case $ac_val in ++ *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; ++ esac ++ printf "%s\n" "$ac_var='\''$ac_val'\''" ++ done | sort ++ echo ++ fi ++ ++ if test -s confdefs.h; then ++ printf "%s\n" "## ----------- ## ++## confdefs.h. ## ++## ----------- ##" ++ echo ++ cat confdefs.h ++ echo ++ fi ++ test "$ac_signal" != 0 && ++ printf "%s\n" "$as_me: caught signal $ac_signal" ++ printf "%s\n" "$as_me: exit $exit_status" ++ } >&5 ++ rm -f core *.core core.conftest.* && ++ rm -f -r conftest* confdefs* conf$$* $ac_clean_files && ++ exit $exit_status ++' 0 ++for ac_signal in 1 2 13 15; do ++ trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal ++done ++ac_signal=0 ++ ++# confdefs.h avoids OS command line length limits that DEFS can exceed. ++rm -f -r conftest* confdefs.h ++ ++printf "%s\n" "/* confdefs.h */" > confdefs.h ++ ++# Predefined preprocessor variables. ++ ++printf "%s\n" "#define PACKAGE_NAME \"$PACKAGE_NAME\"" >>confdefs.h ++ ++printf "%s\n" "#define PACKAGE_TARNAME \"$PACKAGE_TARNAME\"" >>confdefs.h ++ ++printf "%s\n" "#define PACKAGE_VERSION \"$PACKAGE_VERSION\"" >>confdefs.h ++ ++printf "%s\n" "#define PACKAGE_STRING \"$PACKAGE_STRING\"" >>confdefs.h ++ ++printf "%s\n" "#define PACKAGE_BUGREPORT \"$PACKAGE_BUGREPORT\"" >>confdefs.h ++ ++printf "%s\n" "#define PACKAGE_URL \"$PACKAGE_URL\"" >>confdefs.h ++ ++ ++# Let the site file select an alternate cache file if it wants to. ++# Prefer an explicitly selected file to automatically selected ones. ++if test -n "$CONFIG_SITE"; then ++ ac_site_files="$CONFIG_SITE" ++elif test "x$prefix" != xNONE; then ++ ac_site_files="$prefix/share/config.site $prefix/etc/config.site" ++else ++ ac_site_files="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site" ++fi ++ ++for ac_site_file in $ac_site_files ++do ++ case $ac_site_file in #( ++ */*) : ++ ;; #( ++ *) : ++ ac_site_file=./$ac_site_file ;; ++esac ++ if test -f "$ac_site_file" && test -r "$ac_site_file"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 ++printf "%s\n" "$as_me: loading site script $ac_site_file" >&6;} ++ sed 's/^/| /' "$ac_site_file" >&5 ++ . "$ac_site_file" \ ++ || { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++as_fn_error $? "failed to load site script $ac_site_file ++See \`config.log' for more details" "$LINENO" 5; } ++ fi ++done ++ ++if test -r "$cache_file"; then ++ # Some versions of bash will fail to source /dev/null (special files ++ # actually), so we avoid doing that. DJGPP emulates it as a regular file. ++ if test /dev/null != "$cache_file" && test -f "$cache_file"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 ++printf "%s\n" "$as_me: loading cache $cache_file" >&6;} ++ case $cache_file in ++ [\\/]* | ?:[\\/]* ) . "$cache_file";; ++ *) . "./$cache_file";; ++ esac ++ fi ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 ++printf "%s\n" "$as_me: creating cache $cache_file" >&6;} ++ >$cache_file ++fi ++ ++as_fn_append ac_header_c_list " stdio.h stdio_h HAVE_STDIO_H" ++# Test code for whether the C compiler supports C89 (global declarations) ++ac_c_conftest_c89_globals=' ++/* Does the compiler advertise C89 conformance? ++ Do not test the value of __STDC__, because some compilers set it to 0 ++ while being otherwise adequately conformant. */ ++#if !defined __STDC__ ++# error "Compiler does not advertise C89 conformance" ++#endif ++ ++#include ++#include ++struct stat; ++/* Most of the following tests are stolen from RCS 5.7 src/conf.sh. */ ++struct buf { int x; }; ++struct buf * (*rcsopen) (struct buf *, struct stat *, int); ++static char *e (p, i) ++ char **p; ++ int i; ++{ ++ return p[i]; ++} ++static char *f (char * (*g) (char **, int), char **p, ...) ++{ ++ char *s; ++ va_list v; ++ va_start (v,p); ++ s = g (p, va_arg (v,int)); ++ va_end (v); ++ return s; ++} ++ ++/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has ++ function prototypes and stuff, but not \xHH hex character constants. ++ These do not provoke an error unfortunately, instead are silently treated ++ as an "x". The following induces an error, until -std is added to get ++ proper ANSI mode. Curiously \x00 != x always comes out true, for an ++ array size at least. It is necessary to write \x00 == 0 to get something ++ that is true only with -std. */ ++int osf4_cc_array ['\''\x00'\'' == 0 ? 1 : -1]; ++ ++/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters ++ inside strings and character constants. */ ++#define FOO(x) '\''x'\'' ++int xlc6_cc_array[FOO(a) == '\''x'\'' ? 1 : -1]; ++ ++int test (int i, double x); ++struct s1 {int (*f) (int a);}; ++struct s2 {int (*f) (double a);}; ++int pairnames (int, char **, int *(*)(struct buf *, struct stat *, int), ++ int, int);' ++ ++# Test code for whether the C compiler supports C89 (body of main). ++ac_c_conftest_c89_main=' ++ok |= (argc == 0 || f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]); ++' ++ ++# Test code for whether the C compiler supports C99 (global declarations) ++ac_c_conftest_c99_globals=' ++// Does the compiler advertise C99 conformance? ++#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 199901L ++# error "Compiler does not advertise C99 conformance" ++#endif ++ ++#include ++extern int puts (const char *); ++extern int printf (const char *, ...); ++extern int dprintf (int, const char *, ...); ++extern void *malloc (size_t); ++ ++// Check varargs macros. These examples are taken from C99 6.10.3.5. ++// dprintf is used instead of fprintf to avoid needing to declare ++// FILE and stderr. ++#define debug(...) dprintf (2, __VA_ARGS__) ++#define showlist(...) puts (#__VA_ARGS__) ++#define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__)) ++static void ++test_varargs_macros (void) ++{ ++ int x = 1234; ++ int y = 5678; ++ debug ("Flag"); ++ debug ("X = %d\n", x); ++ showlist (The first, second, and third items.); ++ report (x>y, "x is %d but y is %d", x, y); ++} ++ ++// Check long long types. ++#define BIG64 18446744073709551615ull ++#define BIG32 4294967295ul ++#define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0) ++#if !BIG_OK ++ #error "your preprocessor is broken" ++#endif ++#if BIG_OK ++#else ++ #error "your preprocessor is broken" ++#endif ++static long long int bignum = -9223372036854775807LL; ++static unsigned long long int ubignum = BIG64; ++ ++struct incomplete_array ++{ ++ int datasize; ++ double data[]; ++}; ++ ++struct named_init { ++ int number; ++ const wchar_t *name; ++ double average; ++}; ++ ++typedef const char *ccp; ++ ++static inline int ++test_restrict (ccp restrict text) ++{ ++ // See if C++-style comments work. ++ // Iterate through items via the restricted pointer. ++ // Also check for declarations in for loops. ++ for (unsigned int i = 0; *(text+i) != '\''\0'\''; ++i) ++ continue; ++ return 0; ++} ++ ++// Check varargs and va_copy. ++static bool ++test_varargs (const char *format, ...) ++{ ++ va_list args; ++ va_start (args, format); ++ va_list args_copy; ++ va_copy (args_copy, args); ++ ++ const char *str = ""; ++ int number = 0; ++ float fnumber = 0; ++ ++ while (*format) ++ { ++ switch (*format++) ++ { ++ case '\''s'\'': // string ++ str = va_arg (args_copy, const char *); ++ break; ++ case '\''d'\'': // int ++ number = va_arg (args_copy, int); ++ break; ++ case '\''f'\'': // float ++ fnumber = va_arg (args_copy, double); ++ break; ++ default: ++ break; ++ } ++ } ++ va_end (args_copy); ++ va_end (args); ++ ++ return *str && number && fnumber; ++} ++' ++ ++# Test code for whether the C compiler supports C99 (body of main). ++ac_c_conftest_c99_main=' ++ // Check bool. ++ _Bool success = false; ++ success |= (argc != 0); ++ ++ // Check restrict. ++ if (test_restrict ("String literal") == 0) ++ success = true; ++ char *restrict newvar = "Another string"; ++ ++ // Check varargs. ++ success &= test_varargs ("s, d'\'' f .", "string", 65, 34.234); ++ test_varargs_macros (); ++ ++ // Check flexible array members. ++ struct incomplete_array *ia = ++ malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10)); ++ ia->datasize = 10; ++ for (int i = 0; i < ia->datasize; ++i) ++ ia->data[i] = i * 1.234; ++ ++ // Check named initializers. ++ struct named_init ni = { ++ .number = 34, ++ .name = L"Test wide string", ++ .average = 543.34343, ++ }; ++ ++ ni.number = 58; ++ ++ int dynamic_array[ni.number]; ++ dynamic_array[0] = argv[0][0]; ++ dynamic_array[ni.number - 1] = 543; ++ ++ // work around unused variable warnings ++ ok |= (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == '\''x'\'' ++ || dynamic_array[ni.number - 1] != 543); ++' ++ ++# Test code for whether the C compiler supports C11 (global declarations) ++ac_c_conftest_c11_globals=' ++// Does the compiler advertise C11 conformance? ++#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 201112L ++# error "Compiler does not advertise C11 conformance" ++#endif ++ ++// Check _Alignas. ++char _Alignas (double) aligned_as_double; ++char _Alignas (0) no_special_alignment; ++extern char aligned_as_int; ++char _Alignas (0) _Alignas (int) aligned_as_int; ++ ++// Check _Alignof. ++enum ++{ ++ int_alignment = _Alignof (int), ++ int_array_alignment = _Alignof (int[100]), ++ char_alignment = _Alignof (char) ++}; ++_Static_assert (0 < -_Alignof (int), "_Alignof is signed"); ++ ++// Check _Noreturn. ++int _Noreturn does_not_return (void) { for (;;) continue; } ++ ++// Check _Static_assert. ++struct test_static_assert ++{ ++ int x; ++ _Static_assert (sizeof (int) <= sizeof (long int), ++ "_Static_assert does not work in struct"); ++ long int y; ++}; ++ ++// Check UTF-8 literals. ++#define u8 syntax error! ++char const utf8_literal[] = u8"happens to be ASCII" "another string"; ++ ++// Check duplicate typedefs. ++typedef long *long_ptr; ++typedef long int *long_ptr; ++typedef long_ptr long_ptr; ++ ++// Anonymous structures and unions -- taken from C11 6.7.2.1 Example 1. ++struct anonymous ++{ ++ union { ++ struct { int i; int j; }; ++ struct { int k; long int l; } w; ++ }; ++ int m; ++} v1; ++' ++ ++# Test code for whether the C compiler supports C11 (body of main). ++ac_c_conftest_c11_main=' ++ _Static_assert ((offsetof (struct anonymous, i) ++ == offsetof (struct anonymous, w.k)), ++ "Anonymous union alignment botch"); ++ v1.i = 2; ++ v1.w.k = 5; ++ ok |= v1.i != 5; ++' ++ ++# Test code for whether the C compiler supports C11 (complete). ++ac_c_conftest_c11_program="${ac_c_conftest_c89_globals} ++${ac_c_conftest_c99_globals} ++${ac_c_conftest_c11_globals} ++ ++int ++main (int argc, char **argv) ++{ ++ int ok = 0; ++ ${ac_c_conftest_c89_main} ++ ${ac_c_conftest_c99_main} ++ ${ac_c_conftest_c11_main} ++ return ok; ++} ++" ++ ++# Test code for whether the C compiler supports C99 (complete). ++ac_c_conftest_c99_program="${ac_c_conftest_c89_globals} ++${ac_c_conftest_c99_globals} ++ ++int ++main (int argc, char **argv) ++{ ++ int ok = 0; ++ ${ac_c_conftest_c89_main} ++ ${ac_c_conftest_c99_main} ++ return ok; ++} ++" ++ ++# Test code for whether the C compiler supports C89 (complete). ++ac_c_conftest_c89_program="${ac_c_conftest_c89_globals} ++ ++int ++main (int argc, char **argv) ++{ ++ int ok = 0; ++ ${ac_c_conftest_c89_main} ++ return ok; ++} ++" ++ ++as_fn_append ac_header_c_list " stdlib.h stdlib_h HAVE_STDLIB_H" ++as_fn_append ac_header_c_list " string.h string_h HAVE_STRING_H" ++as_fn_append ac_header_c_list " inttypes.h inttypes_h HAVE_INTTYPES_H" ++as_fn_append ac_header_c_list " stdint.h stdint_h HAVE_STDINT_H" ++as_fn_append ac_header_c_list " strings.h strings_h HAVE_STRINGS_H" ++as_fn_append ac_header_c_list " sys/stat.h sys_stat_h HAVE_SYS_STAT_H" ++as_fn_append ac_header_c_list " sys/types.h sys_types_h HAVE_SYS_TYPES_H" ++as_fn_append ac_header_c_list " unistd.h unistd_h HAVE_UNISTD_H" ++as_fn_append ac_header_c_list " wchar.h wchar_h HAVE_WCHAR_H" ++as_fn_append ac_header_c_list " minix/config.h minix_config_h HAVE_MINIX_CONFIG_H" ++# Test code for whether the C++ compiler supports C++98 (global declarations) ++ac_cxx_conftest_cxx98_globals=' ++// Does the compiler advertise C++98 conformance? ++#if !defined __cplusplus || __cplusplus < 199711L ++# error "Compiler does not advertise C++98 conformance" ++#endif ++ ++// These inclusions are to reject old compilers that ++// lack the unsuffixed header files. ++#include ++#include ++ ++// and are *not* freestanding headers in C++98. ++extern void assert (int); ++namespace std { ++ extern int strcmp (const char *, const char *); ++} ++ ++// Namespaces, exceptions, and templates were all added after "C++ 2.0". ++using std::exception; ++using std::strcmp; ++ ++namespace { ++ ++void test_exception_syntax() ++{ ++ try { ++ throw "test"; ++ } catch (const char *s) { ++ // Extra parentheses suppress a warning when building autoconf itself, ++ // due to lint rules shared with more typical C programs. ++ assert (!(strcmp) (s, "test")); ++ } ++} ++ ++template struct test_template ++{ ++ T const val; ++ explicit test_template(T t) : val(t) {} ++ template T add(U u) { return static_cast(u) + val; } ++}; ++ ++} // anonymous namespace ++' ++ ++# Test code for whether the C++ compiler supports C++98 (body of main) ++ac_cxx_conftest_cxx98_main=' ++ assert (argc); ++ assert (! argv[0]); ++{ ++ test_exception_syntax (); ++ test_template tt (2.0); ++ assert (tt.add (4) == 6.0); ++ assert (true && !false); ++} ++' ++ ++# Test code for whether the C++ compiler supports C++11 (global declarations) ++ac_cxx_conftest_cxx11_globals=' ++// Does the compiler advertise C++ 2011 conformance? ++#if !defined __cplusplus || __cplusplus < 201103L ++# error "Compiler does not advertise C++11 conformance" ++#endif ++ ++namespace cxx11test ++{ ++ constexpr int get_val() { return 20; } ++ ++ struct testinit ++ { ++ int i; ++ double d; ++ }; ++ ++ class delegate ++ { ++ public: ++ delegate(int n) : n(n) {} ++ delegate(): delegate(2354) {} ++ ++ virtual int getval() { return this->n; }; ++ protected: ++ int n; ++ }; ++ ++ class overridden : public delegate ++ { ++ public: ++ overridden(int n): delegate(n) {} ++ virtual int getval() override final { return this->n * 2; } ++ }; ++ ++ class nocopy ++ { ++ public: ++ nocopy(int i): i(i) {} ++ nocopy() = default; ++ nocopy(const nocopy&) = delete; ++ nocopy & operator=(const nocopy&) = delete; ++ private: ++ int i; ++ }; ++ ++ // for testing lambda expressions ++ template Ret eval(Fn f, Ret v) ++ { ++ return f(v); ++ } ++ ++ // for testing variadic templates and trailing return types ++ template auto sum(V first) -> V ++ { ++ return first; ++ } ++ template auto sum(V first, Args... rest) -> V ++ { ++ return first + sum(rest...); ++ } ++} ++' ++ ++# Test code for whether the C++ compiler supports C++11 (body of main) ++ac_cxx_conftest_cxx11_main=' ++{ ++ // Test auto and decltype ++ auto a1 = 6538; ++ auto a2 = 48573953.4; ++ auto a3 = "String literal"; ++ ++ int total = 0; ++ for (auto i = a3; *i; ++i) { total += *i; } ++ ++ decltype(a2) a4 = 34895.034; ++} ++{ ++ // Test constexpr ++ short sa[cxx11test::get_val()] = { 0 }; ++} ++{ ++ // Test initializer lists ++ cxx11test::testinit il = { 4323, 435234.23544 }; ++} ++{ ++ // Test range-based for ++ int array[] = {9, 7, 13, 15, 4, 18, 12, 10, 5, 3, ++ 14, 19, 17, 8, 6, 20, 16, 2, 11, 1}; ++ for (auto &x : array) { x += 23; } ++} ++{ ++ // Test lambda expressions ++ using cxx11test::eval; ++ assert (eval ([](int x) { return x*2; }, 21) == 42); ++ double d = 2.0; ++ assert (eval ([&](double x) { return d += x; }, 3.0) == 5.0); ++ assert (d == 5.0); ++ assert (eval ([=](double x) mutable { return d += x; }, 4.0) == 9.0); ++ assert (d == 5.0); ++} ++{ ++ // Test use of variadic templates ++ using cxx11test::sum; ++ auto a = sum(1); ++ auto b = sum(1, 2); ++ auto c = sum(1.0, 2.0, 3.0); ++} ++{ ++ // Test constructor delegation ++ cxx11test::delegate d1; ++ cxx11test::delegate d2(); ++ cxx11test::delegate d3(45); ++} ++{ ++ // Test override and final ++ cxx11test::overridden o1(55464); ++} ++{ ++ // Test nullptr ++ char *c = nullptr; ++} ++{ ++ // Test template brackets ++ test_template<::test_template> v(test_template(12)); ++} ++{ ++ // Unicode literals ++ char const *utf8 = u8"UTF-8 string \u2500"; ++ char16_t const *utf16 = u"UTF-8 string \u2500"; ++ char32_t const *utf32 = U"UTF-32 string \u2500"; ++} ++' ++ ++# Test code for whether the C compiler supports C++11 (complete). ++ac_cxx_conftest_cxx11_program="${ac_cxx_conftest_cxx98_globals} ++${ac_cxx_conftest_cxx11_globals} ++ ++int ++main (int argc, char **argv) ++{ ++ int ok = 0; ++ ${ac_cxx_conftest_cxx98_main} ++ ${ac_cxx_conftest_cxx11_main} ++ return ok; ++} ++" ++ ++# Test code for whether the C compiler supports C++98 (complete). ++ac_cxx_conftest_cxx98_program="${ac_cxx_conftest_cxx98_globals} ++int ++main (int argc, char **argv) ++{ ++ int ok = 0; ++ ${ac_cxx_conftest_cxx98_main} ++ return ok; ++} ++" ++ ++ ++# Auxiliary files required by this configure script. ++ac_aux_files="ltmain.sh compile missing install-sh config.guess config.sub" ++ ++# Locations in which to look for auxiliary files. ++ac_aux_dir_candidates="${srcdir}${PATH_SEPARATOR}${srcdir}/..${PATH_SEPARATOR}${srcdir}/../.." ++ ++# Search for a directory containing all of the required auxiliary files, ++# $ac_aux_files, from the $PATH-style list $ac_aux_dir_candidates. ++# If we don't find one directory that contains all the files we need, ++# we report the set of missing files from the *first* directory in ++# $ac_aux_dir_candidates and give up. ++ac_missing_aux_files="" ++ac_first_candidate=: ++printf "%s\n" "$as_me:${as_lineno-$LINENO}: looking for aux files: $ac_aux_files" >&5 ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++as_found=false ++for as_dir in $ac_aux_dir_candidates ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ as_found=: ++ ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: trying $as_dir" >&5 ++ ac_aux_dir_found=yes ++ ac_install_sh= ++ for ac_aux in $ac_aux_files ++ do ++ # As a special case, if "install-sh" is required, that requirement ++ # can be satisfied by any of "install-sh", "install.sh", or "shtool", ++ # and $ac_install_sh is set appropriately for whichever one is found. ++ if test x"$ac_aux" = x"install-sh" ++ then ++ if test -f "${as_dir}install-sh"; then ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}install-sh found" >&5 ++ ac_install_sh="${as_dir}install-sh -c" ++ elif test -f "${as_dir}install.sh"; then ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}install.sh found" >&5 ++ ac_install_sh="${as_dir}install.sh -c" ++ elif test -f "${as_dir}shtool"; then ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}shtool found" >&5 ++ ac_install_sh="${as_dir}shtool install -c" ++ else ++ ac_aux_dir_found=no ++ if $ac_first_candidate; then ++ ac_missing_aux_files="${ac_missing_aux_files} install-sh" ++ else ++ break ++ fi ++ fi ++ else ++ if test -f "${as_dir}${ac_aux}"; then ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}${ac_aux} found" >&5 ++ else ++ ac_aux_dir_found=no ++ if $ac_first_candidate; then ++ ac_missing_aux_files="${ac_missing_aux_files} ${ac_aux}" ++ else ++ break ++ fi ++ fi ++ fi ++ done ++ if test "$ac_aux_dir_found" = yes; then ++ ac_aux_dir="$as_dir" ++ break ++ fi ++ ac_first_candidate=false ++ ++ as_found=false ++done ++IFS=$as_save_IFS ++if $as_found ++then : ++ ++else $as_nop ++ as_fn_error $? "cannot find required auxiliary files:$ac_missing_aux_files" "$LINENO" 5 ++fi ++ ++ ++# These three variables are undocumented and unsupported, ++# and are intended to be withdrawn in a future Autoconf release. ++# They can cause serious problems if a builder's source tree is in a directory ++# whose full name contains unusual characters. ++if test -f "${ac_aux_dir}config.guess"; then ++ ac_config_guess="$SHELL ${ac_aux_dir}config.guess" ++fi ++if test -f "${ac_aux_dir}config.sub"; then ++ ac_config_sub="$SHELL ${ac_aux_dir}config.sub" ++fi ++if test -f "$ac_aux_dir/configure"; then ++ ac_configure="$SHELL ${ac_aux_dir}configure" ++fi ++ ++# Check that the precious variables saved in the cache have kept the same ++# value. ++ac_cache_corrupted=false ++for ac_var in $ac_precious_vars; do ++ eval ac_old_set=\$ac_cv_env_${ac_var}_set ++ eval ac_new_set=\$ac_env_${ac_var}_set ++ eval ac_old_val=\$ac_cv_env_${ac_var}_value ++ eval ac_new_val=\$ac_env_${ac_var}_value ++ case $ac_old_set,$ac_new_set in ++ set,) ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 ++printf "%s\n" "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ++ ac_cache_corrupted=: ;; ++ ,set) ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 ++printf "%s\n" "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ++ ac_cache_corrupted=: ;; ++ ,);; ++ *) ++ if test "x$ac_old_val" != "x$ac_new_val"; then ++ # differences in whitespace do not lead to failure. ++ ac_old_val_w=`echo x $ac_old_val` ++ ac_new_val_w=`echo x $ac_new_val` ++ if test "$ac_old_val_w" != "$ac_new_val_w"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 ++printf "%s\n" "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ++ ac_cache_corrupted=: ++ else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 ++printf "%s\n" "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} ++ eval $ac_var=\$ac_old_val ++ fi ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 ++printf "%s\n" "$as_me: former value: \`$ac_old_val'" >&2;} ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 ++printf "%s\n" "$as_me: current value: \`$ac_new_val'" >&2;} ++ fi;; ++ esac ++ # Pass precious variables to config.status. ++ if test "$ac_new_set" = set; then ++ case $ac_new_val in ++ *\'*) ac_arg=$ac_var=`printf "%s\n" "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; ++ *) ac_arg=$ac_var=$ac_new_val ;; ++ esac ++ case " $ac_configure_args " in ++ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. ++ *) as_fn_append ac_configure_args " '$ac_arg'" ;; ++ esac ++ fi ++done ++if $ac_cache_corrupted; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 ++printf "%s\n" "$as_me: error: changes in the environment can compromise the build" >&2;} ++ as_fn_error $? "run \`${MAKE-make} distclean' and/or \`rm $cache_file' ++ and start over" "$LINENO" 5 ++fi ++## -------------------- ## ++## Main body of script. ## ++## -------------------- ## ++ ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++ ++ ++ ++ ++ # Make sure we can run config.sub. ++$SHELL "${ac_aux_dir}config.sub" sun4 >/dev/null 2>&1 || ++ as_fn_error $? "cannot run $SHELL ${ac_aux_dir}config.sub" "$LINENO" 5 ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 ++printf %s "checking build system type... " >&6; } ++if test ${ac_cv_build+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_build_alias=$build_alias ++test "x$ac_build_alias" = x && ++ ac_build_alias=`$SHELL "${ac_aux_dir}config.guess"` ++test "x$ac_build_alias" = x && ++ as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 ++ac_cv_build=`$SHELL "${ac_aux_dir}config.sub" $ac_build_alias` || ++ as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $ac_build_alias failed" "$LINENO" 5 ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 ++printf "%s\n" "$ac_cv_build" >&6; } ++case $ac_cv_build in ++*-*-*) ;; ++*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; ++esac ++build=$ac_cv_build ++ac_save_IFS=$IFS; IFS='-' ++set x $ac_cv_build ++shift ++build_cpu=$1 ++build_vendor=$2 ++shift; shift ++# Remember, the first character of IFS is used to create $*, ++# except with old shells: ++build_os=$* ++IFS=$ac_save_IFS ++case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 ++printf %s "checking host system type... " >&6; } ++if test ${ac_cv_host+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test "x$host_alias" = x; then ++ ac_cv_host=$ac_cv_build ++else ++ ac_cv_host=`$SHELL "${ac_aux_dir}config.sub" $host_alias` || ++ as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $host_alias failed" "$LINENO" 5 ++fi ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 ++printf "%s\n" "$ac_cv_host" >&6; } ++case $ac_cv_host in ++*-*-*) ;; ++*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; ++esac ++host=$ac_cv_host ++ac_save_IFS=$IFS; IFS='-' ++set x $ac_cv_host ++shift ++host_cpu=$1 ++host_vendor=$2 ++shift; shift ++# Remember, the first character of IFS is used to create $*, ++# except with old shells: ++host_os=$* ++IFS=$ac_save_IFS ++case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking target system type" >&5 ++printf %s "checking target system type... " >&6; } ++if test ${ac_cv_target+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test "x$target_alias" = x; then ++ ac_cv_target=$ac_cv_host ++else ++ ac_cv_target=`$SHELL "${ac_aux_dir}config.sub" $target_alias` || ++ as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $target_alias failed" "$LINENO" 5 ++fi ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_target" >&5 ++printf "%s\n" "$ac_cv_target" >&6; } ++case $ac_cv_target in ++*-*-*) ;; ++*) as_fn_error $? "invalid value of canonical target" "$LINENO" 5;; ++esac ++target=$ac_cv_target ++ac_save_IFS=$IFS; IFS='-' ++set x $ac_cv_target ++shift ++target_cpu=$1 ++target_vendor=$2 ++shift; shift ++# Remember, the first character of IFS is used to create $*, ++# except with old shells: ++target_os=$* ++IFS=$ac_save_IFS ++case $target_os in *\ *) target_os=`echo "$target_os" | sed 's/ /-/g'`;; esac ++ ++ ++# The aliases save the names the user supplied, while $host etc. ++# will get canonicalized. ++test -n "$target_alias" && ++ test "$program_prefix$program_suffix$program_transform_name" = \ ++ NONENONEs,x,x, && ++ program_prefix=${target_alias}- ++GCC_TOPLEV_SUBDIRS ++am__api_version='1.16' ++ ++ ++ # Find a good install program. We prefer a C program (faster), ++# so one script is as good as another. But avoid the broken or ++# incompatible versions: ++# SysV /etc/install, /usr/sbin/install ++# SunOS /usr/etc/install ++# IRIX /sbin/install ++# AIX /bin/install ++# AmigaOS /C/install, which installs bootblocks on floppy discs ++# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag ++# AFS /usr/afsws/bin/install, which mishandles nonexistent args ++# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" ++# OS/2's system install, which has a completely different semantic ++# ./install, which can be erroneously created by make from ./install.sh. ++# Reject install programs that cannot install multiple files. ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 ++printf %s "checking for a BSD-compatible install... " >&6; } ++if test -z "$INSTALL"; then ++if test ${ac_cv_path_install+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ # Account for fact that we put trailing slashes in our PATH walk. ++case $as_dir in #(( ++ ./ | /[cC]/* | \ ++ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ++ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ ++ /usr/ucb/* ) ;; ++ *) ++ # OSF1 and SCO ODT 3.0 have their own names for install. ++ # Don't use installbsd from OSF since it installs stuff as root ++ # by default. ++ for ac_prog in ginstall scoinst install; do ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext"; then ++ if test $ac_prog = install && ++ grep dspmsg "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then ++ # AIX install. It has an incompatible calling convention. ++ : ++ elif test $ac_prog = install && ++ grep pwplus "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then ++ # program-specific install script used by HP pwplus--don't use. ++ : ++ else ++ rm -rf conftest.one conftest.two conftest.dir ++ echo one > conftest.one ++ echo two > conftest.two ++ mkdir conftest.dir ++ if "$as_dir$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir/" && ++ test -s conftest.one && test -s conftest.two && ++ test -s conftest.dir/conftest.one && ++ test -s conftest.dir/conftest.two ++ then ++ ac_cv_path_install="$as_dir$ac_prog$ac_exec_ext -c" ++ break 3 ++ fi ++ fi ++ fi ++ done ++ done ++ ;; ++esac ++ ++ done ++IFS=$as_save_IFS ++ ++rm -rf conftest.one conftest.two conftest.dir ++ ++fi ++ if test ${ac_cv_path_install+y}; then ++ INSTALL=$ac_cv_path_install ++ else ++ # As a last resort, use the slow shell script. Don't cache a ++ # value for INSTALL within a source directory, because that will ++ # break other packages using the cache if that directory is ++ # removed, or if the value is a relative name. ++ INSTALL=$ac_install_sh ++ fi ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 ++printf "%s\n" "$INSTALL" >&6; } ++ ++# Use test -z because SunOS4 sh mishandles braces in ${var-val}. ++# It thinks the first close brace ends the variable substitution. ++test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' ++ ++test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' ++ ++test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 ++printf %s "checking whether build environment is sane... " >&6; } ++# Reject unsafe characters in $srcdir or the absolute working directory ++# name. Accept space and tab only in the latter. ++am_lf=' ++' ++case `pwd` in ++ *[\\\"\#\$\&\'\`$am_lf]*) ++ as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; ++esac ++case $srcdir in ++ *[\\\"\#\$\&\'\`$am_lf\ \ ]*) ++ as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;; ++esac ++ ++# Do 'set' in a subshell so we don't clobber the current shell's ++# arguments. Must try -L first in case configure is actually a ++# symlink; some systems play weird games with the mod time of symlinks ++# (eg FreeBSD returns the mod time of the symlink's containing ++# directory). ++if ( ++ am_has_slept=no ++ for am_try in 1 2; do ++ echo "timestamp, slept: $am_has_slept" > conftest.file ++ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` ++ if test "$*" = "X"; then ++ # -L didn't work. ++ set X `ls -t "$srcdir/configure" conftest.file` ++ fi ++ if test "$*" != "X $srcdir/configure conftest.file" \ ++ && test "$*" != "X conftest.file $srcdir/configure"; then ++ ++ # If neither matched, then we have a broken ls. This can happen ++ # if, for instance, CONFIG_SHELL is bash and it inherits a ++ # broken ls alias from the environment. This has actually ++ # happened. Such a system could not be considered "sane". ++ as_fn_error $? "ls -t appears to fail. Make sure there is not a broken ++ alias in your environment" "$LINENO" 5 ++ fi ++ if test "$2" = conftest.file || test $am_try -eq 2; then ++ break ++ fi ++ # Just in case. ++ sleep 1 ++ am_has_slept=yes ++ done ++ test "$2" = conftest.file ++ ) ++then ++ # Ok. ++ : ++else ++ as_fn_error $? "newly created file is older than distributed files! ++Check your system clock" "$LINENO" 5 ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 ++printf "%s\n" "yes" >&6; } ++# If we didn't sleep, we still need to ensure time stamps of config.status and ++# generated files are strictly newer. ++am_sleep_pid= ++if grep 'slept: no' conftest.file >/dev/null 2>&1; then ++ ( sleep 1 ) & ++ am_sleep_pid=$! ++fi ++ ++rm -f conftest.file ++ ++test "$program_prefix" != NONE && ++ program_transform_name="s&^&$program_prefix&;$program_transform_name" ++# Use a double $ so make ignores it. ++test "$program_suffix" != NONE && ++ program_transform_name="s&\$&$program_suffix&;$program_transform_name" ++# Double any \ or $. ++# By default was `s,x,x', remove it if useless. ++ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' ++program_transform_name=`printf "%s\n" "$program_transform_name" | sed "$ac_script"` ++ ++ ++# Expand $ac_aux_dir to an absolute path. ++am_aux_dir=`cd "$ac_aux_dir" && pwd` ++ ++ ++ if test x"${MISSING+set}" != xset; then ++ MISSING="\${SHELL} '$am_aux_dir/missing'" ++fi ++# Use eval to expand $SHELL ++if eval "$MISSING --is-lightweight"; then ++ am_missing_run="$MISSING " ++else ++ am_missing_run= ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 ++printf "%s\n" "$as_me: WARNING: 'missing' script is too old or missing" >&2;} ++fi ++ ++if test x"${install_sh+set}" != xset; then ++ case $am_aux_dir in ++ *\ * | *\ *) ++ install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; ++ *) ++ install_sh="\${SHELL} $am_aux_dir/install-sh" ++ esac ++fi ++ ++# Installed binaries are usually stripped using 'strip' when the user ++# run "make install-strip". However 'strip' might not be the right ++# tool to use in cross-compilation environments, therefore Automake ++# will honor the 'STRIP' environment variable to overrule this program. ++if test "$cross_compiling" != no; then ++ if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. ++set dummy ${ac_tool_prefix}strip; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_STRIP+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$STRIP"; then ++ ac_cv_prog_STRIP="$STRIP" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_STRIP="${ac_tool_prefix}strip" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++STRIP=$ac_cv_prog_STRIP ++if test -n "$STRIP"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 ++printf "%s\n" "$STRIP" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_STRIP"; then ++ ac_ct_STRIP=$STRIP ++ # Extract the first word of "strip", so it can be a program name with args. ++set dummy strip; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_STRIP+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_STRIP"; then ++ ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_STRIP="strip" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP ++if test -n "$ac_ct_STRIP"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 ++printf "%s\n" "$ac_ct_STRIP" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_STRIP" = x; then ++ STRIP=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ STRIP=$ac_ct_STRIP ++ fi ++else ++ STRIP="$ac_cv_prog_STRIP" ++fi ++ ++fi ++INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" ++ ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a race-free mkdir -p" >&5 ++printf %s "checking for a race-free mkdir -p... " >&6; } ++if test -z "$MKDIR_P"; then ++ if test ${ac_cv_path_mkdir+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_prog in mkdir gmkdir; do ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext" || continue ++ case `"$as_dir$ac_prog$ac_exec_ext" --version 2>&1` in #( ++ 'mkdir ('*'coreutils) '* | \ ++ 'BusyBox '* | \ ++ 'mkdir (fileutils) '4.1*) ++ ac_cv_path_mkdir=$as_dir$ac_prog$ac_exec_ext ++ break 3;; ++ esac ++ done ++ done ++ done ++IFS=$as_save_IFS ++ ++fi ++ ++ test -d ./--version && rmdir ./--version ++ if test ${ac_cv_path_mkdir+y}; then ++ MKDIR_P="$ac_cv_path_mkdir -p" ++ else ++ # As a last resort, use the slow shell script. Don't cache a ++ # value for MKDIR_P within a source directory, because that will ++ # break other packages using the cache if that directory is ++ # removed, or if the value is a relative name. ++ MKDIR_P="$ac_install_sh -d" ++ fi ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 ++printf "%s\n" "$MKDIR_P" >&6; } ++ ++for ac_prog in gawk mawk nawk awk ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_AWK+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$AWK"; then ++ ac_cv_prog_AWK="$AWK" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_AWK="$ac_prog" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++AWK=$ac_cv_prog_AWK ++if test -n "$AWK"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 ++printf "%s\n" "$AWK" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ test -n "$AWK" && break ++done ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 ++printf %s "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } ++set x ${MAKE-make} ++ac_make=`printf "%s\n" "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` ++if eval test \${ac_cv_prog_make_${ac_make}_set+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat >conftest.make <<\_ACEOF ++SHELL = /bin/sh ++all: ++ @echo '@@@%%%=$(MAKE)=@@@%%%' ++_ACEOF ++# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. ++case `${MAKE-make} -f conftest.make 2>/dev/null` in ++ *@@@%%%=?*=@@@%%%*) ++ eval ac_cv_prog_make_${ac_make}_set=yes;; ++ *) ++ eval ac_cv_prog_make_${ac_make}_set=no;; ++esac ++rm -f conftest.make ++fi ++if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 ++printf "%s\n" "yes" >&6; } ++ SET_MAKE= ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++ SET_MAKE="MAKE=${MAKE-make}" ++fi ++ ++rm -rf .tst 2>/dev/null ++mkdir .tst 2>/dev/null ++if test -d .tst; then ++ am__leading_dot=. ++else ++ am__leading_dot=_ ++fi ++rmdir .tst 2>/dev/null ++ ++# Check whether --enable-silent-rules was given. ++if test ${enable_silent_rules+y} ++then : ++ enableval=$enable_silent_rules; ++fi ++ ++case $enable_silent_rules in # ((( ++ yes) AM_DEFAULT_VERBOSITY=0;; ++ no) AM_DEFAULT_VERBOSITY=1;; ++ *) AM_DEFAULT_VERBOSITY=1;; ++esac ++am_make=${MAKE-make} ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 ++printf %s "checking whether $am_make supports nested variables... " >&6; } ++if test ${am_cv_make_support_nested_variables+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if printf "%s\n" 'TRUE=$(BAR$(V)) ++BAR0=false ++BAR1=true ++V=1 ++am__doit: ++ @$(TRUE) ++.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then ++ am_cv_make_support_nested_variables=yes ++else ++ am_cv_make_support_nested_variables=no ++fi ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 ++printf "%s\n" "$am_cv_make_support_nested_variables" >&6; } ++if test $am_cv_make_support_nested_variables = yes; then ++ AM_V='$(V)' ++ AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' ++else ++ AM_V=$AM_DEFAULT_VERBOSITY ++ AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY ++fi ++AM_BACKSLASH='\' ++ ++if test "`cd $srcdir && pwd`" != "`pwd`"; then ++ # Use -I$(srcdir) only when $(srcdir) != ., so that make's output ++ # is not polluted with repeated "-I." ++ am__isrc=' -I$(srcdir)' ++ # test to see if srcdir already configured ++ if test -f $srcdir/config.status; then ++ as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 ++ fi ++fi ++ ++# test whether we have cygpath ++if test -z "$CYGPATH_W"; then ++ if (cygpath --version) >/dev/null 2>/dev/null; then ++ CYGPATH_W='cygpath -w' ++ else ++ CYGPATH_W=echo ++ fi ++fi ++ ++ ++# Define the identity of the package. ++ PACKAGE='bolt-plugin' ++ VERSION='0.1' ++ ++ ++printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h ++ ++ ++printf "%s\n" "#define VERSION \"$VERSION\"" >>confdefs.h ++ ++# Some tools Automake needs. ++ ++ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} ++ ++ ++AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} ++ ++ ++AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} ++ ++ ++AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} ++ ++ ++MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} ++ ++# For better backward compatibility. To be removed once Automake 1.9.x ++# dies out for good. For more background, see: ++# ++# ++mkdir_p='$(MKDIR_P)' ++ ++# We need awk for the "check" target (and possibly the TAP driver). The ++# system "awk" is bad on some platforms. ++# Always define AMTAR for backward compatibility. Yes, it's still used ++# in the wild :-( We should find a proper way to deprecate it ... ++AMTAR='$${TAR-tar}' ++ ++ ++# We'll loop over all known methods to create a tar archive until one works. ++_am_tools='gnutar pax cpio none' ++ ++am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' ++ ++ ++ ++ ++ ++# Variables for tags utilities; see am/tags.am ++if test -z "$CTAGS"; then ++ CTAGS=ctags ++fi ++ ++if test -z "$ETAGS"; then ++ ETAGS=etags ++fi ++ ++if test -z "$CSCOPE"; then ++ CSCOPE=cscope ++fi ++ ++ ++ ++# POSIX will say in a future version that running "rm -f" with no argument ++# is OK; and we want to be able to make that assumption in our Makefile ++# recipes. So use an aggressive probe to check that the usage we want is ++# actually supported "in the wild" to an acceptable degree. ++# See automake bug#10828. ++# To make any issue more visible, cause the running configure to be aborted ++# by default if the 'rm' program in use doesn't match our expectations; the ++# user can still override this though. ++if rm -f && rm -fr && rm -rf; then : OK; else ++ cat >&2 <<'END' ++Oops! ++ ++Your 'rm' program seems unable to run without file operands specified ++on the command line, even when the '-f' option is present. This is contrary ++to the behaviour of most rm programs out there, and not conforming with ++the upcoming POSIX standard: ++ ++Please tell bug-automake@gnu.org about your system, including the value ++of your $PATH and any error possibly output before this message. This ++can help us improve future automake versions. ++ ++END ++ if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then ++ echo 'Configuration will proceed anyway, since you have set the' >&2 ++ echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 ++ echo >&2 ++ else ++ cat >&2 <<'END' ++Aborting the configuration process, to ensure you take notice of the issue. ++ ++You can download and install GNU coreutils to get an 'rm' implementation ++that behaves properly: . ++ ++If you want to complete the configuration process using your problematic ++'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM ++to "yes", and re-run configure. ++ ++END ++ as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5 ++ fi ++fi ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5 ++printf %s "checking whether to enable maintainer-specific portions of Makefiles... " >&6; } ++ # Check whether --enable-maintainer-mode was given. ++if test ${enable_maintainer_mode+y} ++then : ++ enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval ++else $as_nop ++ USE_MAINTAINER_MODE=no ++fi ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5 ++printf "%s\n" "$USE_MAINTAINER_MODE" >&6; } ++ if test $USE_MAINTAINER_MODE = yes; then ++ MAINTAINER_MODE_TRUE= ++ MAINTAINER_MODE_FALSE='#' ++else ++ MAINTAINER_MODE_TRUE='#' ++ MAINTAINER_MODE_FALSE= ++fi ++ ++ MAINT=$MAINTAINER_MODE_TRUE ++ ++ ++ ++# Check whether --with-libiberty was given. ++if test ${with_libiberty+y} ++then : ++ withval=$with_libiberty; ++else $as_nop ++ with_libiberty=../libiberty ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++DEPDIR="${am__leading_dot}deps" ++ ++ac_config_commands="$ac_config_commands depfiles" ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} supports the include directive" >&5 ++printf %s "checking whether ${MAKE-make} supports the include directive... " >&6; } ++cat > confinc.mk << 'END' ++am__doit: ++ @echo this is the am__doit target >confinc.out ++.PHONY: am__doit ++END ++am__include="#" ++am__quote= ++# BSD make does it like this. ++echo '.include "confinc.mk" # ignored' > confmf.BSD ++# Other make implementations (GNU, Solaris 10, AIX) do it like this. ++echo 'include confinc.mk # ignored' > confmf.GNU ++_am_result=no ++for s in GNU BSD; do ++ { echo "$as_me:$LINENO: ${MAKE-make} -f confmf.$s && cat confinc.out" >&5 ++ (${MAKE-make} -f confmf.$s && cat confinc.out) >&5 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } ++ case $?:`cat confinc.out 2>/dev/null` in #( ++ '0:this is the am__doit target') : ++ case $s in #( ++ BSD) : ++ am__include='.include' am__quote='"' ;; #( ++ *) : ++ am__include='include' am__quote='' ;; ++esac ;; #( ++ *) : ++ ;; ++esac ++ if test "$am__include" != "#"; then ++ _am_result="yes ($s style)" ++ break ++ fi ++done ++rm -f confinc.* confmf.* ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${_am_result}" >&5 ++printf "%s\n" "${_am_result}" >&6; } ++ ++# Check whether --enable-dependency-tracking was given. ++if test ${enable_dependency_tracking+y} ++then : ++ enableval=$enable_dependency_tracking; ++fi ++ ++if test "x$enable_dependency_tracking" != xno; then ++ am_depcomp="$ac_aux_dir/depcomp" ++ AMDEPBACKSLASH='\' ++ am__nodep='_no' ++fi ++ if test "x$enable_dependency_tracking" != xno; then ++ AMDEP_TRUE= ++ AMDEP_FALSE='#' ++else ++ AMDEP_TRUE='#' ++ AMDEP_FALSE= ++fi ++ ++ ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. ++set dummy ${ac_tool_prefix}gcc; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$CC"; then ++ ac_cv_prog_CC="$CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_CC="${ac_tool_prefix}gcc" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++CC=$ac_cv_prog_CC ++if test -n "$CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++printf "%s\n" "$CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_CC"; then ++ ac_ct_CC=$CC ++ # Extract the first word of "gcc", so it can be a program name with args. ++set dummy gcc; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_CC"; then ++ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_CC="gcc" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_CC=$ac_cv_prog_ac_ct_CC ++if test -n "$ac_ct_CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 ++printf "%s\n" "$ac_ct_CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_CC" = x; then ++ CC="" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ CC=$ac_ct_CC ++ fi ++else ++ CC="$ac_cv_prog_CC" ++fi ++ ++if test -z "$CC"; then ++ if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. ++set dummy ${ac_tool_prefix}cc; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$CC"; then ++ ac_cv_prog_CC="$CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_CC="${ac_tool_prefix}cc" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++CC=$ac_cv_prog_CC ++if test -n "$CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++printf "%s\n" "$CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ fi ++fi ++if test -z "$CC"; then ++ # Extract the first word of "cc", so it can be a program name with args. ++set dummy cc; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$CC"; then ++ ac_cv_prog_CC="$CC" # Let the user override the test. ++else ++ ac_prog_rejected=no ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if test "$as_dir$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ++ ac_prog_rejected=yes ++ continue ++ fi ++ ac_cv_prog_CC="cc" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++if test $ac_prog_rejected = yes; then ++ # We found a bogon in the path, so make sure we never use it. ++ set dummy $ac_cv_prog_CC ++ shift ++ if test $# != 0; then ++ # We chose a different compiler from the bogus one. ++ # However, it has the same basename, so the bogon will be chosen ++ # first if we set CC to just the basename; use the full file name. ++ shift ++ ac_cv_prog_CC="$as_dir$ac_word${1+' '}$@" ++ fi ++fi ++fi ++fi ++CC=$ac_cv_prog_CC ++if test -n "$CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++printf "%s\n" "$CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$CC"; then ++ if test -n "$ac_tool_prefix"; then ++ for ac_prog in cl.exe ++ do ++ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. ++set dummy $ac_tool_prefix$ac_prog; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$CC"; then ++ ac_cv_prog_CC="$CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_CC="$ac_tool_prefix$ac_prog" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++CC=$ac_cv_prog_CC ++if test -n "$CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++printf "%s\n" "$CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ test -n "$CC" && break ++ done ++fi ++if test -z "$CC"; then ++ ac_ct_CC=$CC ++ for ac_prog in cl.exe ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_CC"; then ++ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_CC="$ac_prog" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_CC=$ac_cv_prog_ac_ct_CC ++if test -n "$ac_ct_CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 ++printf "%s\n" "$ac_ct_CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ test -n "$ac_ct_CC" && break ++done ++ ++ if test "x$ac_ct_CC" = x; then ++ CC="" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ CC=$ac_ct_CC ++ fi ++fi ++ ++fi ++if test -z "$CC"; then ++ if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}clang", so it can be a program name with args. ++set dummy ${ac_tool_prefix}clang; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$CC"; then ++ ac_cv_prog_CC="$CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_CC="${ac_tool_prefix}clang" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++CC=$ac_cv_prog_CC ++if test -n "$CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++printf "%s\n" "$CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_CC"; then ++ ac_ct_CC=$CC ++ # Extract the first word of "clang", so it can be a program name with args. ++set dummy clang; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_CC"; then ++ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_CC="clang" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_CC=$ac_cv_prog_ac_ct_CC ++if test -n "$ac_ct_CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 ++printf "%s\n" "$ac_ct_CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_CC" = x; then ++ CC="" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ CC=$ac_ct_CC ++ fi ++else ++ CC="$ac_cv_prog_CC" ++fi ++ ++fi ++ ++ ++test -z "$CC" && { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++as_fn_error $? "no acceptable C compiler found in \$PATH ++See \`config.log' for more details" "$LINENO" 5; } ++ ++# Provide some information about the compiler. ++printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 ++set X $ac_compile ++ac_compiler=$2 ++for ac_option in --version -v -V -qversion -version; do ++ { { ac_try="$ac_compiler $ac_option >&5" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++printf "%s\n" "$ac_try_echo"; } >&5 ++ (eval "$ac_compiler $ac_option >&5") 2>conftest.err ++ ac_status=$? ++ if test -s conftest.err; then ++ sed '10a\ ++... rest of stderr output deleted ... ++ 10q' conftest.err >conftest.er1 ++ cat conftest.er1 >&5 ++ fi ++ rm -f conftest.er1 conftest.err ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++done ++ ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++ac_clean_files_save=$ac_clean_files ++ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" ++# Try to create an executable without -o first, disregard a.out. ++# It will help us diagnose broken compilers, and finding out an intuition ++# of exeext. ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 ++printf %s "checking whether the C compiler works... " >&6; } ++ac_link_default=`printf "%s\n" "$ac_link" | sed 's/ -o *conftest[^ ]*//'` ++ ++# The possible output files: ++ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ++ ++ac_rmfiles= ++for ac_file in $ac_files ++do ++ case $ac_file in ++ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; ++ * ) ac_rmfiles="$ac_rmfiles $ac_file";; ++ esac ++done ++rm -f $ac_rmfiles ++ ++if { { ac_try="$ac_link_default" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++printf "%s\n" "$ac_try_echo"; } >&5 ++ (eval "$ac_link_default") 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++then : ++ # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. ++# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' ++# in a Makefile. We should not override ac_cv_exeext if it was cached, ++# so that the user can short-circuit this test for compilers unknown to ++# Autoconf. ++for ac_file in $ac_files '' ++do ++ test -f "$ac_file" || continue ++ case $ac_file in ++ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ++ ;; ++ [ab].out ) ++ # We found the default executable, but exeext='' is most ++ # certainly right. ++ break;; ++ *.* ) ++ if test ${ac_cv_exeext+y} && test "$ac_cv_exeext" != no; ++ then :; else ++ ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` ++ fi ++ # We set ac_cv_exeext here because the later test for it is not ++ # safe: cross compilers may not add the suffix if given an `-o' ++ # argument, so we may need to know it at that point already. ++ # Even if this section looks crufty: it has the advantage of ++ # actually working. ++ break;; ++ * ) ++ break;; ++ esac ++done ++test "$ac_cv_exeext" = no && ac_cv_exeext= ++ ++else $as_nop ++ ac_file='' ++fi ++if test -z "$ac_file" ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++printf "%s\n" "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++as_fn_error 77 "C compiler cannot create executables ++See \`config.log' for more details" "$LINENO" 5; } ++else $as_nop ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 ++printf "%s\n" "yes" >&6; } ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 ++printf %s "checking for C compiler default output file name... " >&6; } ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 ++printf "%s\n" "$ac_file" >&6; } ++ac_exeext=$ac_cv_exeext ++ ++rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ++ac_clean_files=$ac_clean_files_save ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 ++printf %s "checking for suffix of executables... " >&6; } ++if { { ac_try="$ac_link" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++printf "%s\n" "$ac_try_echo"; } >&5 ++ (eval "$ac_link") 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++then : ++ # If both `conftest.exe' and `conftest' are `present' (well, observable) ++# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will ++# work properly (i.e., refer to `conftest.exe'), while it won't with ++# `rm'. ++for ac_file in conftest.exe conftest conftest.*; do ++ test -f "$ac_file" || continue ++ case $ac_file in ++ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; ++ *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` ++ break;; ++ * ) break;; ++ esac ++done ++else $as_nop ++ { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++as_fn_error $? "cannot compute suffix of executables: cannot compile and link ++See \`config.log' for more details" "$LINENO" 5; } ++fi ++rm -f conftest conftest$ac_cv_exeext ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 ++printf "%s\n" "$ac_cv_exeext" >&6; } ++ ++rm -f conftest.$ac_ext ++EXEEXT=$ac_cv_exeext ++ac_exeext=$EXEEXT ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#include ++int ++main (void) ++{ ++FILE *f = fopen ("conftest.out", "w"); ++ return ferror (f) || fclose (f) != 0; ++ ++ ; ++ return 0; ++} ++_ACEOF ++ac_clean_files="$ac_clean_files conftest.out" ++# Check that the compiler produces executables we can run. If not, either ++# the compiler is broken, or we cross compile. ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 ++printf %s "checking whether we are cross compiling... " >&6; } ++if test "$cross_compiling" != yes; then ++ { { ac_try="$ac_link" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++printf "%s\n" "$ac_try_echo"; } >&5 ++ (eval "$ac_link") 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if { ac_try='./conftest$ac_cv_exeext' ++ { { case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++printf "%s\n" "$ac_try_echo"; } >&5 ++ (eval "$ac_try") 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; }; then ++ cross_compiling=no ++ else ++ if test "$cross_compiling" = maybe; then ++ cross_compiling=yes ++ else ++ { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++as_fn_error 77 "cannot run C compiled programs. ++If you meant to cross compile, use \`--host'. ++See \`config.log' for more details" "$LINENO" 5; } ++ fi ++ fi ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 ++printf "%s\n" "$cross_compiling" >&6; } ++ ++rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ++ac_clean_files=$ac_clean_files_save ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 ++printf %s "checking for suffix of object files... " >&6; } ++if test ${ac_cv_objext+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++rm -f conftest.o conftest.obj ++if { { ac_try="$ac_compile" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++printf "%s\n" "$ac_try_echo"; } >&5 ++ (eval "$ac_compile") 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++then : ++ for ac_file in conftest.o conftest.obj conftest.*; do ++ test -f "$ac_file" || continue; ++ case $ac_file in ++ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; ++ *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` ++ break;; ++ esac ++done ++else $as_nop ++ printf "%s\n" "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++as_fn_error $? "cannot compute suffix of object files: cannot compile ++See \`config.log' for more details" "$LINENO" 5; } ++fi ++rm -f conftest.$ac_cv_objext conftest.$ac_ext ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 ++printf "%s\n" "$ac_cv_objext" >&6; } ++OBJEXT=$ac_cv_objext ++ac_objext=$OBJEXT ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C" >&5 ++printf %s "checking whether the compiler supports GNU C... " >&6; } ++if test ${ac_cv_c_compiler_gnu+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++#ifndef __GNUC__ ++ choke me ++#endif ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_compiler_gnu=yes ++else $as_nop ++ ac_compiler_gnu=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ac_cv_c_compiler_gnu=$ac_compiler_gnu ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 ++printf "%s\n" "$ac_cv_c_compiler_gnu" >&6; } ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++if test $ac_compiler_gnu = yes; then ++ GCC=yes ++else ++ GCC= ++fi ++ac_test_CFLAGS=${CFLAGS+y} ++ac_save_CFLAGS=$CFLAGS ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 ++printf %s "checking whether $CC accepts -g... " >&6; } ++if test ${ac_cv_prog_cc_g+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_save_c_werror_flag=$ac_c_werror_flag ++ ac_c_werror_flag=yes ++ ac_cv_prog_cc_g=no ++ CFLAGS="-g" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_prog_cc_g=yes ++else $as_nop ++ CFLAGS="" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ++else $as_nop ++ ac_c_werror_flag=$ac_save_c_werror_flag ++ CFLAGS="-g" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_prog_cc_g=yes ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ ac_c_werror_flag=$ac_save_c_werror_flag ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 ++printf "%s\n" "$ac_cv_prog_cc_g" >&6; } ++if test $ac_test_CFLAGS; then ++ CFLAGS=$ac_save_CFLAGS ++elif test $ac_cv_prog_cc_g = yes; then ++ if test "$GCC" = yes; then ++ CFLAGS="-g -O2" ++ else ++ CFLAGS="-g" ++ fi ++else ++ if test "$GCC" = yes; then ++ CFLAGS="-O2" ++ else ++ CFLAGS= ++ fi ++fi ++ac_prog_cc_stdc=no ++if test x$ac_prog_cc_stdc = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C11 features" >&5 ++printf %s "checking for $CC option to enable C11 features... " >&6; } ++if test ${ac_cv_prog_cc_c11+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_cv_prog_cc_c11=no ++ac_save_CC=$CC ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++$ac_c_conftest_c11_program ++_ACEOF ++for ac_arg in '' -std=gnu11 ++do ++ CC="$ac_save_CC $ac_arg" ++ if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_prog_cc_c11=$ac_arg ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam ++ test "x$ac_cv_prog_cc_c11" != "xno" && break ++done ++rm -f conftest.$ac_ext ++CC=$ac_save_CC ++fi ++ ++if test "x$ac_cv_prog_cc_c11" = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 ++printf "%s\n" "unsupported" >&6; } ++else $as_nop ++ if test "x$ac_cv_prog_cc_c11" = x ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 ++printf "%s\n" "none needed" >&6; } ++else $as_nop ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c11" >&5 ++printf "%s\n" "$ac_cv_prog_cc_c11" >&6; } ++ CC="$CC $ac_cv_prog_cc_c11" ++fi ++ ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c11 ++ ac_prog_cc_stdc=c11 ++fi ++fi ++if test x$ac_prog_cc_stdc = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C99 features" >&5 ++printf %s "checking for $CC option to enable C99 features... " >&6; } ++if test ${ac_cv_prog_cc_c99+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_cv_prog_cc_c99=no ++ac_save_CC=$CC ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++$ac_c_conftest_c99_program ++_ACEOF ++for ac_arg in '' -std=gnu99 -std=c99 -c99 -qlanglvl=extc1x -qlanglvl=extc99 -AC99 -D_STDC_C99= ++do ++ CC="$ac_save_CC $ac_arg" ++ if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_prog_cc_c99=$ac_arg ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam ++ test "x$ac_cv_prog_cc_c99" != "xno" && break ++done ++rm -f conftest.$ac_ext ++CC=$ac_save_CC ++fi ++ ++if test "x$ac_cv_prog_cc_c99" = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 ++printf "%s\n" "unsupported" >&6; } ++else $as_nop ++ if test "x$ac_cv_prog_cc_c99" = x ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 ++printf "%s\n" "none needed" >&6; } ++else $as_nop ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5 ++printf "%s\n" "$ac_cv_prog_cc_c99" >&6; } ++ CC="$CC $ac_cv_prog_cc_c99" ++fi ++ ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99 ++ ac_prog_cc_stdc=c99 ++fi ++fi ++if test x$ac_prog_cc_stdc = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C89 features" >&5 ++printf %s "checking for $CC option to enable C89 features... " >&6; } ++if test ${ac_cv_prog_cc_c89+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_cv_prog_cc_c89=no ++ac_save_CC=$CC ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++$ac_c_conftest_c89_program ++_ACEOF ++for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" ++do ++ CC="$ac_save_CC $ac_arg" ++ if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_prog_cc_c89=$ac_arg ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam ++ test "x$ac_cv_prog_cc_c89" != "xno" && break ++done ++rm -f conftest.$ac_ext ++CC=$ac_save_CC ++fi ++ ++if test "x$ac_cv_prog_cc_c89" = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 ++printf "%s\n" "unsupported" >&6; } ++else $as_nop ++ if test "x$ac_cv_prog_cc_c89" = x ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 ++printf "%s\n" "none needed" >&6; } ++else $as_nop ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 ++printf "%s\n" "$ac_cv_prog_cc_c89" >&6; } ++ CC="$CC $ac_cv_prog_cc_c89" ++fi ++ ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89 ++ ac_prog_cc_stdc=c89 ++fi ++fi ++ ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++ ++ ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 ++printf %s "checking whether $CC understands -c and -o together... " >&6; } ++if test ${am_cv_prog_cc_c_o+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++ # Make sure it works both with $CC and with simple cc. ++ # Following AC_PROG_CC_C_O, we do the test twice because some ++ # compilers refuse to overwrite an existing .o file with -o, ++ # though they will create one. ++ am_cv_prog_cc_c_o=yes ++ for am_i in 1 2; do ++ if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5 ++ ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } \ ++ && test -f conftest2.$ac_objext; then ++ : OK ++ else ++ am_cv_prog_cc_c_o=no ++ break ++ fi ++ done ++ rm -f core conftest* ++ unset am_i ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 ++printf "%s\n" "$am_cv_prog_cc_c_o" >&6; } ++if test "$am_cv_prog_cc_c_o" != yes; then ++ # Losing compiler, so override with the script. ++ # FIXME: It is wrong to rewrite CC. ++ # But if we don't then we get into trouble of one sort or another. ++ # A longer-term fix would be to have automake use am__CC in this case, ++ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" ++ CC="$am_aux_dir/compile $CC" ++fi ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++ ++depcc="$CC" am_compiler_list= ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 ++printf %s "checking dependency style of $depcc... " >&6; } ++if test ${am_cv_CC_dependencies_compiler_type+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then ++ # We make a subdir and do the tests there. Otherwise we can end up ++ # making bogus files that we don't know about and never remove. For ++ # instance it was reported that on HP-UX the gcc test will end up ++ # making a dummy file named 'D' -- because '-MD' means "put the output ++ # in D". ++ rm -rf conftest.dir ++ mkdir conftest.dir ++ # Copy depcomp to subdir because otherwise we won't find it if we're ++ # using a relative directory. ++ cp "$am_depcomp" conftest.dir ++ cd conftest.dir ++ # We will build objects and dependencies in a subdirectory because ++ # it helps to detect inapplicable dependency modes. For instance ++ # both Tru64's cc and ICC support -MD to output dependencies as a ++ # side effect of compilation, but ICC will put the dependencies in ++ # the current directory while Tru64 will put them in the object ++ # directory. ++ mkdir sub ++ ++ am_cv_CC_dependencies_compiler_type=none ++ if test "$am_compiler_list" = ""; then ++ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` ++ fi ++ am__universal=false ++ case " $depcc " in #( ++ *\ -arch\ *\ -arch\ *) am__universal=true ;; ++ esac ++ ++ for depmode in $am_compiler_list; do ++ # Setup a source with many dependencies, because some compilers ++ # like to wrap large dependency lists on column 80 (with \), and ++ # we should not choose a depcomp mode which is confused by this. ++ # ++ # We need to recreate these files for each test, as the compiler may ++ # overwrite some of them when testing with obscure command lines. ++ # This happens at least with the AIX C compiler. ++ : > sub/conftest.c ++ for i in 1 2 3 4 5 6; do ++ echo '#include "conftst'$i'.h"' >> sub/conftest.c ++ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with ++ # Solaris 10 /bin/sh. ++ echo '/* dummy */' > sub/conftst$i.h ++ done ++ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf ++ ++ # We check with '-c' and '-o' for the sake of the "dashmstdout" ++ # mode. It turns out that the SunPro C++ compiler does not properly ++ # handle '-M -o', and we need to detect this. Also, some Intel ++ # versions had trouble with output in subdirs. ++ am__obj=sub/conftest.${OBJEXT-o} ++ am__minus_obj="-o $am__obj" ++ case $depmode in ++ gcc) ++ # This depmode causes a compiler race in universal mode. ++ test "$am__universal" = false || continue ++ ;; ++ nosideeffect) ++ # After this tag, mechanisms are not by side-effect, so they'll ++ # only be used when explicitly requested. ++ if test "x$enable_dependency_tracking" = xyes; then ++ continue ++ else ++ break ++ fi ++ ;; ++ msvc7 | msvc7msys | msvisualcpp | msvcmsys) ++ # This compiler won't grok '-c -o', but also, the minuso test has ++ # not run yet. These depmodes are late enough in the game, and ++ # so weak that their functioning should not be impacted. ++ am__obj=conftest.${OBJEXT-o} ++ am__minus_obj= ++ ;; ++ none) break ;; ++ esac ++ if depmode=$depmode \ ++ source=sub/conftest.c object=$am__obj \ ++ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ ++ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ ++ >/dev/null 2>conftest.err && ++ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && ++ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && ++ grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ++ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then ++ # icc doesn't choke on unknown options, it will just issue warnings ++ # or remarks (even with -Werror). So we grep stderr for any message ++ # that says an option was ignored or not supported. ++ # When given -MP, icc 7.0 and 7.1 complain thusly: ++ # icc: Command line warning: ignoring option '-M'; no argument required ++ # The diagnosis changed in icc 8.0: ++ # icc: Command line remark: option '-MP' not supported ++ if (grep 'ignoring option' conftest.err || ++ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else ++ am_cv_CC_dependencies_compiler_type=$depmode ++ break ++ fi ++ fi ++ done ++ ++ cd .. ++ rm -rf conftest.dir ++else ++ am_cv_CC_dependencies_compiler_type=none ++fi ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 ++printf "%s\n" "$am_cv_CC_dependencies_compiler_type" >&6; } ++CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type ++ ++ if ++ test "x$enable_dependency_tracking" != xno \ ++ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then ++ am__fastdepCC_TRUE= ++ am__fastdepCC_FALSE='#' ++else ++ am__fastdepCC_TRUE='#' ++ am__fastdepCC_FALSE= ++fi ++ ++ ++ ++ac_header= ac_cache= ++for ac_item in $ac_header_c_list ++do ++ if test $ac_cache; then ++ ac_fn_c_check_header_compile "$LINENO" $ac_header ac_cv_header_$ac_cache "$ac_includes_default" ++ if eval test \"x\$ac_cv_header_$ac_cache\" = xyes; then ++ printf "%s\n" "#define $ac_item 1" >> confdefs.h ++ fi ++ ac_header= ac_cache= ++ elif test $ac_header; then ++ ac_cache=$ac_item ++ else ++ ac_header=$ac_item ++ fi ++done ++ ++ ++ ++ ++ ++ ++ ++ ++if test $ac_cv_header_stdlib_h = yes && test $ac_cv_header_string_h = yes ++then : ++ ++printf "%s\n" "#define STDC_HEADERS 1" >>confdefs.h ++ ++fi ++ ++ ++ ++ ++ ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5 ++printf %s "checking whether it is safe to define __EXTENSIONS__... " >&6; } ++if test ${ac_cv_safe_to_define___extensions__+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++# define __EXTENSIONS__ 1 ++ $ac_includes_default ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_safe_to_define___extensions__=yes ++else $as_nop ++ ac_cv_safe_to_define___extensions__=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_safe_to_define___extensions__" >&5 ++printf "%s\n" "$ac_cv_safe_to_define___extensions__" >&6; } ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether _XOPEN_SOURCE should be defined" >&5 ++printf %s "checking whether _XOPEN_SOURCE should be defined... " >&6; } ++if test ${ac_cv_should_define__xopen_source+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_cv_should_define__xopen_source=no ++ if test $ac_cv_header_wchar_h = yes ++then : ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++ #include ++ mbstate_t x; ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++ #define _XOPEN_SOURCE 500 ++ #include ++ mbstate_t x; ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_should_define__xopen_source=yes ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++fi ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_should_define__xopen_source" >&5 ++printf "%s\n" "$ac_cv_should_define__xopen_source" >&6; } ++ ++ printf "%s\n" "#define _ALL_SOURCE 1" >>confdefs.h ++ ++ printf "%s\n" "#define _DARWIN_C_SOURCE 1" >>confdefs.h ++ ++ printf "%s\n" "#define _GNU_SOURCE 1" >>confdefs.h ++ ++ printf "%s\n" "#define _HPUX_ALT_XOPEN_SOCKET_API 1" >>confdefs.h ++ ++ printf "%s\n" "#define _NETBSD_SOURCE 1" >>confdefs.h ++ ++ printf "%s\n" "#define _OPENBSD_SOURCE 1" >>confdefs.h ++ ++ printf "%s\n" "#define _POSIX_PTHREAD_SEMANTICS 1" >>confdefs.h ++ ++ printf "%s\n" "#define __STDC_WANT_IEC_60559_ATTRIBS_EXT__ 1" >>confdefs.h ++ ++ printf "%s\n" "#define __STDC_WANT_IEC_60559_BFP_EXT__ 1" >>confdefs.h ++ ++ printf "%s\n" "#define __STDC_WANT_IEC_60559_DFP_EXT__ 1" >>confdefs.h ++ ++ printf "%s\n" "#define __STDC_WANT_IEC_60559_FUNCS_EXT__ 1" >>confdefs.h ++ ++ printf "%s\n" "#define __STDC_WANT_IEC_60559_TYPES_EXT__ 1" >>confdefs.h ++ ++ printf "%s\n" "#define __STDC_WANT_LIB_EXT2__ 1" >>confdefs.h ++ ++ printf "%s\n" "#define __STDC_WANT_MATH_SPEC_FUNCS__ 1" >>confdefs.h ++ ++ printf "%s\n" "#define _TANDEM_SOURCE 1" >>confdefs.h ++ ++ if test $ac_cv_header_minix_config_h = yes ++then : ++ MINIX=yes ++ printf "%s\n" "#define _MINIX 1" >>confdefs.h ++ ++ printf "%s\n" "#define _POSIX_SOURCE 1" >>confdefs.h ++ ++ printf "%s\n" "#define _POSIX_1_SOURCE 2" >>confdefs.h ++ ++else $as_nop ++ MINIX= ++fi ++ if test $ac_cv_safe_to_define___extensions__ = yes ++then : ++ printf "%s\n" "#define __EXTENSIONS__ 1" >>confdefs.h ++ ++fi ++ if test $ac_cv_should_define__xopen_source = yes ++then : ++ printf "%s\n" "#define _XOPEN_SOURCE 500" >>confdefs.h ++ ++fi ++ ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. ++set dummy ${ac_tool_prefix}gcc; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$CC"; then ++ ac_cv_prog_CC="$CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_CC="${ac_tool_prefix}gcc" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++CC=$ac_cv_prog_CC ++if test -n "$CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++printf "%s\n" "$CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_CC"; then ++ ac_ct_CC=$CC ++ # Extract the first word of "gcc", so it can be a program name with args. ++set dummy gcc; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_CC"; then ++ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_CC="gcc" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_CC=$ac_cv_prog_ac_ct_CC ++if test -n "$ac_ct_CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 ++printf "%s\n" "$ac_ct_CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_CC" = x; then ++ CC="" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ CC=$ac_ct_CC ++ fi ++else ++ CC="$ac_cv_prog_CC" ++fi ++ ++if test -z "$CC"; then ++ if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. ++set dummy ${ac_tool_prefix}cc; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$CC"; then ++ ac_cv_prog_CC="$CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_CC="${ac_tool_prefix}cc" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++CC=$ac_cv_prog_CC ++if test -n "$CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++printf "%s\n" "$CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ fi ++fi ++if test -z "$CC"; then ++ # Extract the first word of "cc", so it can be a program name with args. ++set dummy cc; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$CC"; then ++ ac_cv_prog_CC="$CC" # Let the user override the test. ++else ++ ac_prog_rejected=no ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if test "$as_dir$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ++ ac_prog_rejected=yes ++ continue ++ fi ++ ac_cv_prog_CC="cc" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++if test $ac_prog_rejected = yes; then ++ # We found a bogon in the path, so make sure we never use it. ++ set dummy $ac_cv_prog_CC ++ shift ++ if test $# != 0; then ++ # We chose a different compiler from the bogus one. ++ # However, it has the same basename, so the bogon will be chosen ++ # first if we set CC to just the basename; use the full file name. ++ shift ++ ac_cv_prog_CC="$as_dir$ac_word${1+' '}$@" ++ fi ++fi ++fi ++fi ++CC=$ac_cv_prog_CC ++if test -n "$CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++printf "%s\n" "$CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$CC"; then ++ if test -n "$ac_tool_prefix"; then ++ for ac_prog in cl.exe ++ do ++ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. ++set dummy $ac_tool_prefix$ac_prog; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$CC"; then ++ ac_cv_prog_CC="$CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_CC="$ac_tool_prefix$ac_prog" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++CC=$ac_cv_prog_CC ++if test -n "$CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++printf "%s\n" "$CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ test -n "$CC" && break ++ done ++fi ++if test -z "$CC"; then ++ ac_ct_CC=$CC ++ for ac_prog in cl.exe ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_CC"; then ++ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_CC="$ac_prog" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_CC=$ac_cv_prog_ac_ct_CC ++if test -n "$ac_ct_CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 ++printf "%s\n" "$ac_ct_CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ test -n "$ac_ct_CC" && break ++done ++ ++ if test "x$ac_ct_CC" = x; then ++ CC="" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ CC=$ac_ct_CC ++ fi ++fi ++ ++fi ++if test -z "$CC"; then ++ if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}clang", so it can be a program name with args. ++set dummy ${ac_tool_prefix}clang; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$CC"; then ++ ac_cv_prog_CC="$CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_CC="${ac_tool_prefix}clang" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++CC=$ac_cv_prog_CC ++if test -n "$CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++printf "%s\n" "$CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_CC"; then ++ ac_ct_CC=$CC ++ # Extract the first word of "clang", so it can be a program name with args. ++set dummy clang; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_CC"; then ++ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_CC="clang" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_CC=$ac_cv_prog_ac_ct_CC ++if test -n "$ac_ct_CC"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 ++printf "%s\n" "$ac_ct_CC" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_CC" = x; then ++ CC="" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ CC=$ac_ct_CC ++ fi ++else ++ CC="$ac_cv_prog_CC" ++fi ++ ++fi ++ ++ ++test -z "$CC" && { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++as_fn_error $? "no acceptable C compiler found in \$PATH ++See \`config.log' for more details" "$LINENO" 5; } ++ ++# Provide some information about the compiler. ++printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 ++set X $ac_compile ++ac_compiler=$2 ++for ac_option in --version -v -V -qversion -version; do ++ { { ac_try="$ac_compiler $ac_option >&5" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++printf "%s\n" "$ac_try_echo"; } >&5 ++ (eval "$ac_compiler $ac_option >&5") 2>conftest.err ++ ac_status=$? ++ if test -s conftest.err; then ++ sed '10a\ ++... rest of stderr output deleted ... ++ 10q' conftest.err >conftest.er1 ++ cat conftest.er1 >&5 ++ fi ++ rm -f conftest.er1 conftest.err ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++done ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C" >&5 ++printf %s "checking whether the compiler supports GNU C... " >&6; } ++if test ${ac_cv_c_compiler_gnu+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++#ifndef __GNUC__ ++ choke me ++#endif ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_compiler_gnu=yes ++else $as_nop ++ ac_compiler_gnu=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ac_cv_c_compiler_gnu=$ac_compiler_gnu ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 ++printf "%s\n" "$ac_cv_c_compiler_gnu" >&6; } ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++if test $ac_compiler_gnu = yes; then ++ GCC=yes ++else ++ GCC= ++fi ++ac_test_CFLAGS=${CFLAGS+y} ++ac_save_CFLAGS=$CFLAGS ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 ++printf %s "checking whether $CC accepts -g... " >&6; } ++if test ${ac_cv_prog_cc_g+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_save_c_werror_flag=$ac_c_werror_flag ++ ac_c_werror_flag=yes ++ ac_cv_prog_cc_g=no ++ CFLAGS="-g" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_prog_cc_g=yes ++else $as_nop ++ CFLAGS="" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ++else $as_nop ++ ac_c_werror_flag=$ac_save_c_werror_flag ++ CFLAGS="-g" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_prog_cc_g=yes ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ ac_c_werror_flag=$ac_save_c_werror_flag ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 ++printf "%s\n" "$ac_cv_prog_cc_g" >&6; } ++if test $ac_test_CFLAGS; then ++ CFLAGS=$ac_save_CFLAGS ++elif test $ac_cv_prog_cc_g = yes; then ++ if test "$GCC" = yes; then ++ CFLAGS="-g -O2" ++ else ++ CFLAGS="-g" ++ fi ++else ++ if test "$GCC" = yes; then ++ CFLAGS="-O2" ++ else ++ CFLAGS= ++ fi ++fi ++ac_prog_cc_stdc=no ++if test x$ac_prog_cc_stdc = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C11 features" >&5 ++printf %s "checking for $CC option to enable C11 features... " >&6; } ++if test ${ac_cv_prog_cc_c11+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_cv_prog_cc_c11=no ++ac_save_CC=$CC ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++$ac_c_conftest_c11_program ++_ACEOF ++for ac_arg in '' -std=gnu11 ++do ++ CC="$ac_save_CC $ac_arg" ++ if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_prog_cc_c11=$ac_arg ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam ++ test "x$ac_cv_prog_cc_c11" != "xno" && break ++done ++rm -f conftest.$ac_ext ++CC=$ac_save_CC ++fi ++ ++if test "x$ac_cv_prog_cc_c11" = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 ++printf "%s\n" "unsupported" >&6; } ++else $as_nop ++ if test "x$ac_cv_prog_cc_c11" = x ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 ++printf "%s\n" "none needed" >&6; } ++else $as_nop ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c11" >&5 ++printf "%s\n" "$ac_cv_prog_cc_c11" >&6; } ++ CC="$CC $ac_cv_prog_cc_c11" ++fi ++ ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c11 ++ ac_prog_cc_stdc=c11 ++fi ++fi ++if test x$ac_prog_cc_stdc = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C99 features" >&5 ++printf %s "checking for $CC option to enable C99 features... " >&6; } ++if test ${ac_cv_prog_cc_c99+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_cv_prog_cc_c99=no ++ac_save_CC=$CC ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++$ac_c_conftest_c99_program ++_ACEOF ++for ac_arg in '' -std=gnu99 -std=c99 -c99 -qlanglvl=extc1x -qlanglvl=extc99 -AC99 -D_STDC_C99= ++do ++ CC="$ac_save_CC $ac_arg" ++ if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_prog_cc_c99=$ac_arg ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam ++ test "x$ac_cv_prog_cc_c99" != "xno" && break ++done ++rm -f conftest.$ac_ext ++CC=$ac_save_CC ++fi ++ ++if test "x$ac_cv_prog_cc_c99" = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 ++printf "%s\n" "unsupported" >&6; } ++else $as_nop ++ if test "x$ac_cv_prog_cc_c99" = x ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 ++printf "%s\n" "none needed" >&6; } ++else $as_nop ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5 ++printf "%s\n" "$ac_cv_prog_cc_c99" >&6; } ++ CC="$CC $ac_cv_prog_cc_c99" ++fi ++ ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99 ++ ac_prog_cc_stdc=c99 ++fi ++fi ++if test x$ac_prog_cc_stdc = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C89 features" >&5 ++printf %s "checking for $CC option to enable C89 features... " >&6; } ++if test ${ac_cv_prog_cc_c89+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_cv_prog_cc_c89=no ++ac_save_CC=$CC ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++$ac_c_conftest_c89_program ++_ACEOF ++for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" ++do ++ CC="$ac_save_CC $ac_arg" ++ if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_prog_cc_c89=$ac_arg ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam ++ test "x$ac_cv_prog_cc_c89" != "xno" && break ++done ++rm -f conftest.$ac_ext ++CC=$ac_save_CC ++fi ++ ++if test "x$ac_cv_prog_cc_c89" = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 ++printf "%s\n" "unsupported" >&6; } ++else $as_nop ++ if test "x$ac_cv_prog_cc_c89" = x ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 ++printf "%s\n" "none needed" >&6; } ++else $as_nop ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 ++printf "%s\n" "$ac_cv_prog_cc_c89" >&6; } ++ CC="$CC $ac_cv_prog_cc_c89" ++fi ++ ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89 ++ ac_prog_cc_stdc=c89 ++fi ++fi ++ ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++ ++ ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 ++printf %s "checking whether $CC understands -c and -o together... " >&6; } ++if test ${am_cv_prog_cc_c_o+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++ # Make sure it works both with $CC and with simple cc. ++ # Following AC_PROG_CC_C_O, we do the test twice because some ++ # compilers refuse to overwrite an existing .o file with -o, ++ # though they will create one. ++ am_cv_prog_cc_c_o=yes ++ for am_i in 1 2; do ++ if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5 ++ ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } \ ++ && test -f conftest2.$ac_objext; then ++ : OK ++ else ++ am_cv_prog_cc_c_o=no ++ break ++ fi ++ done ++ rm -f core conftest* ++ unset am_i ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 ++printf "%s\n" "$am_cv_prog_cc_c_o" >&6; } ++if test "$am_cv_prog_cc_c_o" != yes; then ++ # Losing compiler, so override with the script. ++ # FIXME: It is wrong to rewrite CC. ++ # But if we don't then we get into trouble of one sort or another. ++ # A longer-term fix would be to have automake use am__CC in this case, ++ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" ++ CC="$am_aux_dir/compile $CC" ++fi ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++ ++depcc="$CC" am_compiler_list= ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 ++printf %s "checking dependency style of $depcc... " >&6; } ++if test ${am_cv_CC_dependencies_compiler_type+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then ++ # We make a subdir and do the tests there. Otherwise we can end up ++ # making bogus files that we don't know about and never remove. For ++ # instance it was reported that on HP-UX the gcc test will end up ++ # making a dummy file named 'D' -- because '-MD' means "put the output ++ # in D". ++ rm -rf conftest.dir ++ mkdir conftest.dir ++ # Copy depcomp to subdir because otherwise we won't find it if we're ++ # using a relative directory. ++ cp "$am_depcomp" conftest.dir ++ cd conftest.dir ++ # We will build objects and dependencies in a subdirectory because ++ # it helps to detect inapplicable dependency modes. For instance ++ # both Tru64's cc and ICC support -MD to output dependencies as a ++ # side effect of compilation, but ICC will put the dependencies in ++ # the current directory while Tru64 will put them in the object ++ # directory. ++ mkdir sub ++ ++ am_cv_CC_dependencies_compiler_type=none ++ if test "$am_compiler_list" = ""; then ++ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` ++ fi ++ am__universal=false ++ case " $depcc " in #( ++ *\ -arch\ *\ -arch\ *) am__universal=true ;; ++ esac ++ ++ for depmode in $am_compiler_list; do ++ # Setup a source with many dependencies, because some compilers ++ # like to wrap large dependency lists on column 80 (with \), and ++ # we should not choose a depcomp mode which is confused by this. ++ # ++ # We need to recreate these files for each test, as the compiler may ++ # overwrite some of them when testing with obscure command lines. ++ # This happens at least with the AIX C compiler. ++ : > sub/conftest.c ++ for i in 1 2 3 4 5 6; do ++ echo '#include "conftst'$i'.h"' >> sub/conftest.c ++ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with ++ # Solaris 10 /bin/sh. ++ echo '/* dummy */' > sub/conftst$i.h ++ done ++ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf ++ ++ # We check with '-c' and '-o' for the sake of the "dashmstdout" ++ # mode. It turns out that the SunPro C++ compiler does not properly ++ # handle '-M -o', and we need to detect this. Also, some Intel ++ # versions had trouble with output in subdirs. ++ am__obj=sub/conftest.${OBJEXT-o} ++ am__minus_obj="-o $am__obj" ++ case $depmode in ++ gcc) ++ # This depmode causes a compiler race in universal mode. ++ test "$am__universal" = false || continue ++ ;; ++ nosideeffect) ++ # After this tag, mechanisms are not by side-effect, so they'll ++ # only be used when explicitly requested. ++ if test "x$enable_dependency_tracking" = xyes; then ++ continue ++ else ++ break ++ fi ++ ;; ++ msvc7 | msvc7msys | msvisualcpp | msvcmsys) ++ # This compiler won't grok '-c -o', but also, the minuso test has ++ # not run yet. These depmodes are late enough in the game, and ++ # so weak that their functioning should not be impacted. ++ am__obj=conftest.${OBJEXT-o} ++ am__minus_obj= ++ ;; ++ none) break ;; ++ esac ++ if depmode=$depmode \ ++ source=sub/conftest.c object=$am__obj \ ++ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ ++ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ ++ >/dev/null 2>conftest.err && ++ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && ++ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && ++ grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ++ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then ++ # icc doesn't choke on unknown options, it will just issue warnings ++ # or remarks (even with -Werror). So we grep stderr for any message ++ # that says an option was ignored or not supported. ++ # When given -MP, icc 7.0 and 7.1 complain thusly: ++ # icc: Command line warning: ignoring option '-M'; no argument required ++ # The diagnosis changed in icc 8.0: ++ # icc: Command line remark: option '-MP' not supported ++ if (grep 'ignoring option' conftest.err || ++ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else ++ am_cv_CC_dependencies_compiler_type=$depmode ++ break ++ fi ++ fi ++ done ++ ++ cd .. ++ rm -rf conftest.dir ++else ++ am_cv_CC_dependencies_compiler_type=none ++fi ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 ++printf "%s\n" "$am_cv_CC_dependencies_compiler_type" >&6; } ++CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type ++ ++ if ++ test "x$enable_dependency_tracking" != xno \ ++ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then ++ am__fastdepCC_TRUE= ++ am__fastdepCC_FALSE='#' ++else ++ am__fastdepCC_TRUE='#' ++ am__fastdepCC_FALSE= ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ac_ext=cpp ++ac_cpp='$CXXCPP $CPPFLAGS' ++ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_cxx_compiler_gnu ++if test -z "$CXX"; then ++ if test -n "$CCC"; then ++ CXX=$CCC ++ else ++ if test -n "$ac_tool_prefix"; then ++ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++ ++ do ++ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. ++set dummy $ac_tool_prefix$ac_prog; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_CXX+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$CXX"; then ++ ac_cv_prog_CXX="$CXX" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++CXX=$ac_cv_prog_CXX ++if test -n "$CXX"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 ++printf "%s\n" "$CXX" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ test -n "$CXX" && break ++ done ++fi ++if test -z "$CXX"; then ++ ac_ct_CXX=$CXX ++ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++ ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_CXX+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_CXX"; then ++ ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_CXX="$ac_prog" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_CXX=$ac_cv_prog_ac_ct_CXX ++if test -n "$ac_ct_CXX"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 ++printf "%s\n" "$ac_ct_CXX" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ test -n "$ac_ct_CXX" && break ++done ++ ++ if test "x$ac_ct_CXX" = x; then ++ CXX="g++" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ CXX=$ac_ct_CXX ++ fi ++fi ++ ++ fi ++fi ++# Provide some information about the compiler. ++printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 ++set X $ac_compile ++ac_compiler=$2 ++for ac_option in --version -v -V -qversion; do ++ { { ac_try="$ac_compiler $ac_option >&5" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++printf "%s\n" "$ac_try_echo"; } >&5 ++ (eval "$ac_compiler $ac_option >&5") 2>conftest.err ++ ac_status=$? ++ if test -s conftest.err; then ++ sed '10a\ ++... rest of stderr output deleted ... ++ 10q' conftest.err >conftest.er1 ++ cat conftest.er1 >&5 ++ fi ++ rm -f conftest.er1 conftest.err ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++done ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C++" >&5 ++printf %s "checking whether the compiler supports GNU C++... " >&6; } ++if test ${ac_cv_cxx_compiler_gnu+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++#ifndef __GNUC__ ++ choke me ++#endif ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_cxx_try_compile "$LINENO" ++then : ++ ac_compiler_gnu=yes ++else $as_nop ++ ac_compiler_gnu=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ac_cv_cxx_compiler_gnu=$ac_compiler_gnu ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 ++printf "%s\n" "$ac_cv_cxx_compiler_gnu" >&6; } ++ac_compiler_gnu=$ac_cv_cxx_compiler_gnu ++ ++if test $ac_compiler_gnu = yes; then ++ GXX=yes ++else ++ GXX= ++fi ++ac_test_CXXFLAGS=${CXXFLAGS+y} ++ac_save_CXXFLAGS=$CXXFLAGS ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 ++printf %s "checking whether $CXX accepts -g... " >&6; } ++if test ${ac_cv_prog_cxx_g+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_save_cxx_werror_flag=$ac_cxx_werror_flag ++ ac_cxx_werror_flag=yes ++ ac_cv_prog_cxx_g=no ++ CXXFLAGS="-g" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_cxx_try_compile "$LINENO" ++then : ++ ac_cv_prog_cxx_g=yes ++else $as_nop ++ CXXFLAGS="" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_cxx_try_compile "$LINENO" ++then : ++ ++else $as_nop ++ ac_cxx_werror_flag=$ac_save_cxx_werror_flag ++ CXXFLAGS="-g" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_cxx_try_compile "$LINENO" ++then : ++ ac_cv_prog_cxx_g=yes ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ ac_cxx_werror_flag=$ac_save_cxx_werror_flag ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 ++printf "%s\n" "$ac_cv_prog_cxx_g" >&6; } ++if test $ac_test_CXXFLAGS; then ++ CXXFLAGS=$ac_save_CXXFLAGS ++elif test $ac_cv_prog_cxx_g = yes; then ++ if test "$GXX" = yes; then ++ CXXFLAGS="-g -O2" ++ else ++ CXXFLAGS="-g" ++ fi ++else ++ if test "$GXX" = yes; then ++ CXXFLAGS="-O2" ++ else ++ CXXFLAGS= ++ fi ++fi ++ac_prog_cxx_stdcxx=no ++if test x$ac_prog_cxx_stdcxx = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++11 features" >&5 ++printf %s "checking for $CXX option to enable C++11 features... " >&6; } ++if test ${ac_cv_prog_cxx_cxx11+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_cv_prog_cxx_cxx11=no ++ac_save_CXX=$CXX ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++$ac_cxx_conftest_cxx11_program ++_ACEOF ++for ac_arg in '' -std=gnu++11 -std=gnu++0x -std=c++11 -std=c++0x -qlanglvl=extended0x -AA ++do ++ CXX="$ac_save_CXX $ac_arg" ++ if ac_fn_cxx_try_compile "$LINENO" ++then : ++ ac_cv_prog_cxx_cxx11=$ac_arg ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam ++ test "x$ac_cv_prog_cxx_cxx11" != "xno" && break ++done ++rm -f conftest.$ac_ext ++CXX=$ac_save_CXX ++fi ++ ++if test "x$ac_cv_prog_cxx_cxx11" = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 ++printf "%s\n" "unsupported" >&6; } ++else $as_nop ++ if test "x$ac_cv_prog_cxx_cxx11" = x ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 ++printf "%s\n" "none needed" >&6; } ++else $as_nop ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx11" >&5 ++printf "%s\n" "$ac_cv_prog_cxx_cxx11" >&6; } ++ CXX="$CXX $ac_cv_prog_cxx_cxx11" ++fi ++ ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx11 ++ ac_prog_cxx_stdcxx=cxx11 ++fi ++fi ++if test x$ac_prog_cxx_stdcxx = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++98 features" >&5 ++printf %s "checking for $CXX option to enable C++98 features... " >&6; } ++if test ${ac_cv_prog_cxx_cxx98+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_cv_prog_cxx_cxx98=no ++ac_save_CXX=$CXX ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++$ac_cxx_conftest_cxx98_program ++_ACEOF ++for ac_arg in '' -std=gnu++98 -std=c++98 -qlanglvl=extended -AA ++do ++ CXX="$ac_save_CXX $ac_arg" ++ if ac_fn_cxx_try_compile "$LINENO" ++then : ++ ac_cv_prog_cxx_cxx98=$ac_arg ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam ++ test "x$ac_cv_prog_cxx_cxx98" != "xno" && break ++done ++rm -f conftest.$ac_ext ++CXX=$ac_save_CXX ++fi ++ ++if test "x$ac_cv_prog_cxx_cxx98" = xno ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 ++printf "%s\n" "unsupported" >&6; } ++else $as_nop ++ if test "x$ac_cv_prog_cxx_cxx98" = x ++then : ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 ++printf "%s\n" "none needed" >&6; } ++else $as_nop ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx98" >&5 ++printf "%s\n" "$ac_cv_prog_cxx_cxx98" >&6; } ++ CXX="$CXX $ac_cv_prog_cxx_cxx98" ++fi ++ ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx98 ++ ac_prog_cxx_stdcxx=cxx98 ++fi ++fi ++ ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++depcc="$CXX" am_compiler_list= ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 ++printf %s "checking dependency style of $depcc... " >&6; } ++if test ${am_cv_CXX_dependencies_compiler_type+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then ++ # We make a subdir and do the tests there. Otherwise we can end up ++ # making bogus files that we don't know about and never remove. For ++ # instance it was reported that on HP-UX the gcc test will end up ++ # making a dummy file named 'D' -- because '-MD' means "put the output ++ # in D". ++ rm -rf conftest.dir ++ mkdir conftest.dir ++ # Copy depcomp to subdir because otherwise we won't find it if we're ++ # using a relative directory. ++ cp "$am_depcomp" conftest.dir ++ cd conftest.dir ++ # We will build objects and dependencies in a subdirectory because ++ # it helps to detect inapplicable dependency modes. For instance ++ # both Tru64's cc and ICC support -MD to output dependencies as a ++ # side effect of compilation, but ICC will put the dependencies in ++ # the current directory while Tru64 will put them in the object ++ # directory. ++ mkdir sub ++ ++ am_cv_CXX_dependencies_compiler_type=none ++ if test "$am_compiler_list" = ""; then ++ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` ++ fi ++ am__universal=false ++ case " $depcc " in #( ++ *\ -arch\ *\ -arch\ *) am__universal=true ;; ++ esac ++ ++ for depmode in $am_compiler_list; do ++ # Setup a source with many dependencies, because some compilers ++ # like to wrap large dependency lists on column 80 (with \), and ++ # we should not choose a depcomp mode which is confused by this. ++ # ++ # We need to recreate these files for each test, as the compiler may ++ # overwrite some of them when testing with obscure command lines. ++ # This happens at least with the AIX C compiler. ++ : > sub/conftest.c ++ for i in 1 2 3 4 5 6; do ++ echo '#include "conftst'$i'.h"' >> sub/conftest.c ++ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with ++ # Solaris 10 /bin/sh. ++ echo '/* dummy */' > sub/conftst$i.h ++ done ++ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf ++ ++ # We check with '-c' and '-o' for the sake of the "dashmstdout" ++ # mode. It turns out that the SunPro C++ compiler does not properly ++ # handle '-M -o', and we need to detect this. Also, some Intel ++ # versions had trouble with output in subdirs. ++ am__obj=sub/conftest.${OBJEXT-o} ++ am__minus_obj="-o $am__obj" ++ case $depmode in ++ gcc) ++ # This depmode causes a compiler race in universal mode. ++ test "$am__universal" = false || continue ++ ;; ++ nosideeffect) ++ # After this tag, mechanisms are not by side-effect, so they'll ++ # only be used when explicitly requested. ++ if test "x$enable_dependency_tracking" = xyes; then ++ continue ++ else ++ break ++ fi ++ ;; ++ msvc7 | msvc7msys | msvisualcpp | msvcmsys) ++ # This compiler won't grok '-c -o', but also, the minuso test has ++ # not run yet. These depmodes are late enough in the game, and ++ # so weak that their functioning should not be impacted. ++ am__obj=conftest.${OBJEXT-o} ++ am__minus_obj= ++ ;; ++ none) break ;; ++ esac ++ if depmode=$depmode \ ++ source=sub/conftest.c object=$am__obj \ ++ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ ++ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ ++ >/dev/null 2>conftest.err && ++ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && ++ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && ++ grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ++ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then ++ # icc doesn't choke on unknown options, it will just issue warnings ++ # or remarks (even with -Werror). So we grep stderr for any message ++ # that says an option was ignored or not supported. ++ # When given -MP, icc 7.0 and 7.1 complain thusly: ++ # icc: Command line warning: ignoring option '-M'; no argument required ++ # The diagnosis changed in icc 8.0: ++ # icc: Command line remark: option '-MP' not supported ++ if (grep 'ignoring option' conftest.err || ++ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else ++ am_cv_CXX_dependencies_compiler_type=$depmode ++ break ++ fi ++ fi ++ done ++ ++ cd .. ++ rm -rf conftest.dir ++else ++ am_cv_CXX_dependencies_compiler_type=none ++fi ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 ++printf "%s\n" "$am_cv_CXX_dependencies_compiler_type" >&6; } ++CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type ++ ++ if ++ test "x$enable_dependency_tracking" != xno \ ++ && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then ++ am__fastdepCXX_TRUE= ++ am__fastdepCXX_FALSE='#' ++else ++ am__fastdepCXX_TRUE='#' ++ am__fastdepCXX_FALSE= ++fi ++ ++ ++# Check whether --enable-largefile was given. ++if test ${enable_largefile+y} ++then : ++ enableval=$enable_largefile; ++fi ++ ++if test "$enable_largefile" != no; then ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 ++printf %s "checking for special C compiler options needed for large files... " >&6; } ++if test ${ac_cv_sys_largefile_CC+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_cv_sys_largefile_CC=no ++ if test "$GCC" != yes; then ++ ac_save_CC=$CC ++ while :; do ++ # IRIX 6.2 and later do not support large files by default, ++ # so use the C compiler's -n32 option if that helps. ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#include ++ /* Check that off_t can represent 2**63 - 1 correctly. ++ We can't simply define LARGE_OFF_T to be 9223372036854775807, ++ since some C++ compilers masquerading as C compilers ++ incorrectly reject 9223372036854775807. */ ++#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) ++ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 ++ && LARGE_OFF_T % 2147483647 == 1) ++ ? 1 : -1]; ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++ if ac_fn_c_try_compile "$LINENO" ++then : ++ break ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam ++ CC="$CC -n32" ++ if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_sys_largefile_CC=' -n32'; break ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam ++ break ++ done ++ CC=$ac_save_CC ++ rm -f conftest.$ac_ext ++ fi ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 ++printf "%s\n" "$ac_cv_sys_largefile_CC" >&6; } ++ if test "$ac_cv_sys_largefile_CC" != no; then ++ CC=$CC$ac_cv_sys_largefile_CC ++ fi ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 ++printf %s "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } ++if test ${ac_cv_sys_file_offset_bits+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ while :; do ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#include ++ /* Check that off_t can represent 2**63 - 1 correctly. ++ We can't simply define LARGE_OFF_T to be 9223372036854775807, ++ since some C++ compilers masquerading as C compilers ++ incorrectly reject 9223372036854775807. */ ++#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) ++ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 ++ && LARGE_OFF_T % 2147483647 == 1) ++ ? 1 : -1]; ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_sys_file_offset_bits=no; break ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#define _FILE_OFFSET_BITS 64 ++#include ++ /* Check that off_t can represent 2**63 - 1 correctly. ++ We can't simply define LARGE_OFF_T to be 9223372036854775807, ++ since some C++ compilers masquerading as C compilers ++ incorrectly reject 9223372036854775807. */ ++#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) ++ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 ++ && LARGE_OFF_T % 2147483647 == 1) ++ ? 1 : -1]; ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_sys_file_offset_bits=64; break ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ ac_cv_sys_file_offset_bits=unknown ++ break ++done ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 ++printf "%s\n" "$ac_cv_sys_file_offset_bits" >&6; } ++case $ac_cv_sys_file_offset_bits in #( ++ no | unknown) ;; ++ *) ++printf "%s\n" "#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits" >>confdefs.h ++;; ++esac ++rm -rf conftest* ++ if test $ac_cv_sys_file_offset_bits = unknown; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 ++printf %s "checking for _LARGE_FILES value needed for large files... " >&6; } ++if test ${ac_cv_sys_large_files+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ while :; do ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#include ++ /* Check that off_t can represent 2**63 - 1 correctly. ++ We can't simply define LARGE_OFF_T to be 9223372036854775807, ++ since some C++ compilers masquerading as C compilers ++ incorrectly reject 9223372036854775807. */ ++#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) ++ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 ++ && LARGE_OFF_T % 2147483647 == 1) ++ ? 1 : -1]; ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_sys_large_files=no; break ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#define _LARGE_FILES 1 ++#include ++ /* Check that off_t can represent 2**63 - 1 correctly. ++ We can't simply define LARGE_OFF_T to be 9223372036854775807, ++ since some C++ compilers masquerading as C compilers ++ incorrectly reject 9223372036854775807. */ ++#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) ++ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 ++ && LARGE_OFF_T % 2147483647 == 1) ++ ? 1 : -1]; ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_sys_large_files=1; break ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ ac_cv_sys_large_files=unknown ++ break ++done ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 ++printf "%s\n" "$ac_cv_sys_large_files" >&6; } ++case $ac_cv_sys_large_files in #( ++ no | unknown) ;; ++ *) ++printf "%s\n" "#define _LARGE_FILES $ac_cv_sys_large_files" >>confdefs.h ++;; ++esac ++rm -rf conftest* ++ fi ++fi ++ ++ ++# Check whether -static-libgcc is supported. ++saved_LDFLAGS="$LDFLAGS" ++LDFLAGS="$LDFLAGS -static-libgcc" ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -static-libgcc" >&5 ++printf %s "checking for -static-libgcc... " >&6; } ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++ int main() {} ++_ACEOF ++if ac_fn_c_try_link "$LINENO" ++then : ++ have_static_libgcc=yes ++else $as_nop ++ have_static_libgcc=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $have_static_libgcc" >&5 ++printf "%s\n" "$have_static_libgcc" >&6; }; ++LDFLAGS="$saved_LDFLAGS" ++# Need -Wc to get it through libtool. ++if test "x$have_static_libgcc" = xyes; then ++ ac_bolt_plugin_ldflags="-Wc,-static-libgcc" ++fi ++ ++ ++if test x"$host_subdir" = x.; then ++ gcc_build_dir=../gcc ++else ++ gcc_build_dir=../../$host_subdir/gcc ++fi ++ ++ ++# Used for constructing correct paths for offload compilers. ++accel_dir_suffix= ++real_target_noncanonical=${target_noncanonical} ++if test x"$enable_as_accelerator_for" != x; then ++ accel_dir_suffix=/accel/${target_noncanonical} ++ real_target_noncanonical=${enable_as_accelerator_for} ++fi ++ ++ ++ ++# Determine what GCC version number to use in filesystem paths. ++GCC_BASE_VER ++ ++case `pwd` in ++ *\ * | *\ *) ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 ++printf "%s\n" "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; ++esac ++ ++ ++ ++macro_version='2.4.7' ++macro_revision='2.4.7' ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ltmain=$ac_aux_dir/ltmain.sh ++ ++# Backslashify metacharacters that are still active within ++# double-quoted strings. ++sed_quote_subst='s/\(["`$\\]\)/\\\1/g' ++ ++# Same as above, but do not quote variable references. ++double_quote_subst='s/\(["`\\]\)/\\\1/g' ++ ++# Sed substitution to delay expansion of an escaped shell variable in a ++# double_quote_subst'ed string. ++delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' ++ ++# Sed substitution to delay expansion of an escaped single quote. ++delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' ++ ++# Sed substitution to avoid accidental globbing in evaled expressions ++no_glob_subst='s/\*/\\\*/g' ++ ++ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ++ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ++ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 ++printf %s "checking how to print strings... " >&6; } ++# Test print first, because it will be a builtin if present. ++if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ ++ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then ++ ECHO='print -r --' ++elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then ++ ECHO='printf %s\n' ++else ++ # Use this function as a fallback that always works. ++ func_fallback_echo () ++ { ++ eval 'cat <<_LTECHO_EOF ++$1 ++_LTECHO_EOF' ++ } ++ ECHO='func_fallback_echo' ++fi ++ ++# func_echo_all arg... ++# Invoke $ECHO with all args, space-separated. ++func_echo_all () ++{ ++ $ECHO "" ++} ++ ++case $ECHO in ++ printf*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: printf" >&5 ++printf "%s\n" "printf" >&6; } ;; ++ print*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 ++printf "%s\n" "print -r" >&6; } ;; ++ *) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: cat" >&5 ++printf "%s\n" "cat" >&6; } ;; ++esac ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 ++printf %s "checking for a sed that does not truncate output... " >&6; } ++if test ${ac_cv_path_SED+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ ++ for ac_i in 1 2 3 4 5 6 7; do ++ ac_script="$ac_script$as_nl$ac_script" ++ done ++ echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed ++ { ac_script=; unset ac_script;} ++ if test -z "$SED"; then ++ ac_path_SED_found=false ++ # Loop through the user's path and test for each of PROGNAME-LIST ++ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_prog in sed gsed ++ do ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ ac_path_SED="$as_dir$ac_prog$ac_exec_ext" ++ as_fn_executable_p "$ac_path_SED" || continue ++# Check for GNU ac_path_SED and select it if it is found. ++ # Check for GNU $ac_path_SED ++case `"$ac_path_SED" --version 2>&1` in ++*GNU*) ++ ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; ++*) ++ ac_count=0 ++ printf %s 0123456789 >"conftest.in" ++ while : ++ do ++ cat "conftest.in" "conftest.in" >"conftest.tmp" ++ mv "conftest.tmp" "conftest.in" ++ cp "conftest.in" "conftest.nl" ++ printf "%s\n" '' >> "conftest.nl" ++ "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break ++ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ++ as_fn_arith $ac_count + 1 && ac_count=$as_val ++ if test $ac_count -gt ${ac_path_SED_max-0}; then ++ # Best one so far, save it but keep looking for a better one ++ ac_cv_path_SED="$ac_path_SED" ++ ac_path_SED_max=$ac_count ++ fi ++ # 10*(2^10) chars as input seems more than enough ++ test $ac_count -gt 10 && break ++ done ++ rm -f conftest.in conftest.tmp conftest.nl conftest.out;; ++esac ++ ++ $ac_path_SED_found && break 3 ++ done ++ done ++ done ++IFS=$as_save_IFS ++ if test -z "$ac_cv_path_SED"; then ++ as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 ++ fi ++else ++ ac_cv_path_SED=$SED ++fi ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 ++printf "%s\n" "$ac_cv_path_SED" >&6; } ++ SED="$ac_cv_path_SED" ++ rm -f conftest.sed ++ ++test -z "$SED" && SED=sed ++Xsed="$SED -e 1s/^X//" ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 ++printf %s "checking for grep that handles long lines and -e... " >&6; } ++if test ${ac_cv_path_GREP+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -z "$GREP"; then ++ ac_path_GREP_found=false ++ # Loop through the user's path and test for each of PROGNAME-LIST ++ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_prog in grep ggrep ++ do ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ ac_path_GREP="$as_dir$ac_prog$ac_exec_ext" ++ as_fn_executable_p "$ac_path_GREP" || continue ++# Check for GNU ac_path_GREP and select it if it is found. ++ # Check for GNU $ac_path_GREP ++case `"$ac_path_GREP" --version 2>&1` in ++*GNU*) ++ ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; ++*) ++ ac_count=0 ++ printf %s 0123456789 >"conftest.in" ++ while : ++ do ++ cat "conftest.in" "conftest.in" >"conftest.tmp" ++ mv "conftest.tmp" "conftest.in" ++ cp "conftest.in" "conftest.nl" ++ printf "%s\n" 'GREP' >> "conftest.nl" ++ "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break ++ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ++ as_fn_arith $ac_count + 1 && ac_count=$as_val ++ if test $ac_count -gt ${ac_path_GREP_max-0}; then ++ # Best one so far, save it but keep looking for a better one ++ ac_cv_path_GREP="$ac_path_GREP" ++ ac_path_GREP_max=$ac_count ++ fi ++ # 10*(2^10) chars as input seems more than enough ++ test $ac_count -gt 10 && break ++ done ++ rm -f conftest.in conftest.tmp conftest.nl conftest.out;; ++esac ++ ++ $ac_path_GREP_found && break 3 ++ done ++ done ++ done ++IFS=$as_save_IFS ++ if test -z "$ac_cv_path_GREP"; then ++ as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 ++ fi ++else ++ ac_cv_path_GREP=$GREP ++fi ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 ++printf "%s\n" "$ac_cv_path_GREP" >&6; } ++ GREP="$ac_cv_path_GREP" ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 ++printf %s "checking for egrep... " >&6; } ++if test ${ac_cv_path_EGREP+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 ++ then ac_cv_path_EGREP="$GREP -E" ++ else ++ if test -z "$EGREP"; then ++ ac_path_EGREP_found=false ++ # Loop through the user's path and test for each of PROGNAME-LIST ++ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_prog in egrep ++ do ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ ac_path_EGREP="$as_dir$ac_prog$ac_exec_ext" ++ as_fn_executable_p "$ac_path_EGREP" || continue ++# Check for GNU ac_path_EGREP and select it if it is found. ++ # Check for GNU $ac_path_EGREP ++case `"$ac_path_EGREP" --version 2>&1` in ++*GNU*) ++ ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; ++*) ++ ac_count=0 ++ printf %s 0123456789 >"conftest.in" ++ while : ++ do ++ cat "conftest.in" "conftest.in" >"conftest.tmp" ++ mv "conftest.tmp" "conftest.in" ++ cp "conftest.in" "conftest.nl" ++ printf "%s\n" 'EGREP' >> "conftest.nl" ++ "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break ++ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ++ as_fn_arith $ac_count + 1 && ac_count=$as_val ++ if test $ac_count -gt ${ac_path_EGREP_max-0}; then ++ # Best one so far, save it but keep looking for a better one ++ ac_cv_path_EGREP="$ac_path_EGREP" ++ ac_path_EGREP_max=$ac_count ++ fi ++ # 10*(2^10) chars as input seems more than enough ++ test $ac_count -gt 10 && break ++ done ++ rm -f conftest.in conftest.tmp conftest.nl conftest.out;; ++esac ++ ++ $ac_path_EGREP_found && break 3 ++ done ++ done ++ done ++IFS=$as_save_IFS ++ if test -z "$ac_cv_path_EGREP"; then ++ as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 ++ fi ++else ++ ac_cv_path_EGREP=$EGREP ++fi ++ ++ fi ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 ++printf "%s\n" "$ac_cv_path_EGREP" >&6; } ++ EGREP="$ac_cv_path_EGREP" ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 ++printf %s "checking for fgrep... " >&6; } ++if test ${ac_cv_path_FGREP+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 ++ then ac_cv_path_FGREP="$GREP -F" ++ else ++ if test -z "$FGREP"; then ++ ac_path_FGREP_found=false ++ # Loop through the user's path and test for each of PROGNAME-LIST ++ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_prog in fgrep ++ do ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ ac_path_FGREP="$as_dir$ac_prog$ac_exec_ext" ++ as_fn_executable_p "$ac_path_FGREP" || continue ++# Check for GNU ac_path_FGREP and select it if it is found. ++ # Check for GNU $ac_path_FGREP ++case `"$ac_path_FGREP" --version 2>&1` in ++*GNU*) ++ ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; ++*) ++ ac_count=0 ++ printf %s 0123456789 >"conftest.in" ++ while : ++ do ++ cat "conftest.in" "conftest.in" >"conftest.tmp" ++ mv "conftest.tmp" "conftest.in" ++ cp "conftest.in" "conftest.nl" ++ printf "%s\n" 'FGREP' >> "conftest.nl" ++ "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break ++ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ++ as_fn_arith $ac_count + 1 && ac_count=$as_val ++ if test $ac_count -gt ${ac_path_FGREP_max-0}; then ++ # Best one so far, save it but keep looking for a better one ++ ac_cv_path_FGREP="$ac_path_FGREP" ++ ac_path_FGREP_max=$ac_count ++ fi ++ # 10*(2^10) chars as input seems more than enough ++ test $ac_count -gt 10 && break ++ done ++ rm -f conftest.in conftest.tmp conftest.nl conftest.out;; ++esac ++ ++ $ac_path_FGREP_found && break 3 ++ done ++ done ++ done ++IFS=$as_save_IFS ++ if test -z "$ac_cv_path_FGREP"; then ++ as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 ++ fi ++else ++ ac_cv_path_FGREP=$FGREP ++fi ++ ++ fi ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 ++printf "%s\n" "$ac_cv_path_FGREP" >&6; } ++ FGREP="$ac_cv_path_FGREP" ++ ++ ++test -z "$GREP" && GREP=grep ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++# Check whether --with-gnu-ld was given. ++if test ${with_gnu_ld+y} ++then : ++ withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes ++else $as_nop ++ with_gnu_ld=no ++fi ++ ++ac_prog=ld ++if test yes = "$GCC"; then ++ # Check if gcc -print-prog-name=ld gives a path. ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 ++printf %s "checking for ld used by $CC... " >&6; } ++ case $host in ++ *-*-mingw*) ++ # gcc leaves a trailing carriage return, which upsets mingw ++ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; ++ *) ++ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; ++ esac ++ case $ac_prog in ++ # Accept absolute paths. ++ [\\/]* | ?:[\\/]*) ++ re_direlt='/[^/][^/]*/\.\./' ++ # Canonicalize the pathname of ld ++ ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` ++ while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ++ ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` ++ done ++ test -z "$LD" && LD=$ac_prog ++ ;; ++ "") ++ # If it fails, then pretend we aren't using GCC. ++ ac_prog=ld ++ ;; ++ *) ++ # If it is relative, then search for the first ld in PATH. ++ with_gnu_ld=unknown ++ ;; ++ esac ++elif test yes = "$with_gnu_ld"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 ++printf %s "checking for GNU ld... " >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 ++printf %s "checking for non-GNU ld... " >&6; } ++fi ++if test ${lt_cv_path_LD+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -z "$LD"; then ++ lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ++ for ac_dir in $PATH; do ++ IFS=$lt_save_ifs ++ test -z "$ac_dir" && ac_dir=. ++ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then ++ lt_cv_path_LD=$ac_dir/$ac_prog ++ # Check to see if the program is GNU ld. I'd rather use --version, ++ # but apparently some variants of GNU ld only accept -v. ++ # Break only if it was the GNU/non-GNU ld that we prefer. ++ case `"$lt_cv_path_LD" -v 2>&1 &5 ++printf "%s\n" "$LD" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 ++printf %s "checking if the linker ($LD) is GNU ld... " >&6; } ++if test ${lt_cv_prog_gnu_ld+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ # I'd rather use --version here, but apparently some GNU lds only accept -v. ++case `$LD -v 2>&1 &5 ++printf "%s\n" "$lt_cv_prog_gnu_ld" >&6; } ++with_gnu_ld=$lt_cv_prog_gnu_ld ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 ++printf %s "checking for BSD- or MS-compatible name lister (nm)... " >&6; } ++if test ${lt_cv_path_NM+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$NM"; then ++ # Let the user override the test. ++ lt_cv_path_NM=$NM ++else ++ lt_nm_to_check=${ac_tool_prefix}nm ++ if test -n "$ac_tool_prefix" && test "$build" = "$host"; then ++ lt_nm_to_check="$lt_nm_to_check nm" ++ fi ++ for lt_tmp_nm in $lt_nm_to_check; do ++ lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ++ for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do ++ IFS=$lt_save_ifs ++ test -z "$ac_dir" && ac_dir=. ++ tmp_nm=$ac_dir/$lt_tmp_nm ++ if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then ++ # Check to see if the nm accepts a BSD-compat flag. ++ # Adding the 'sed 1q' prevents false positives on HP-UX, which says: ++ # nm: unknown option "B" ignored ++ # Tru64's nm complains that /dev/null is an invalid object file ++ # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty ++ case $build_os in ++ mingw*) lt_bad_file=conftest.nm/nofile ;; ++ *) lt_bad_file=/dev/null ;; ++ esac ++ case `"$tmp_nm" -B $lt_bad_file 2>&1 | $SED '1q'` in ++ *$lt_bad_file* | *'Invalid file or object type'*) ++ lt_cv_path_NM="$tmp_nm -B" ++ break 2 ++ ;; ++ *) ++ case `"$tmp_nm" -p /dev/null 2>&1 | $SED '1q'` in ++ */dev/null*) ++ lt_cv_path_NM="$tmp_nm -p" ++ break 2 ++ ;; ++ *) ++ lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but ++ continue # so that we can try to find one that supports BSD flags ++ ;; ++ esac ++ ;; ++ esac ++ fi ++ done ++ IFS=$lt_save_ifs ++ done ++ : ${lt_cv_path_NM=no} ++fi ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 ++printf "%s\n" "$lt_cv_path_NM" >&6; } ++if test no != "$lt_cv_path_NM"; then ++ NM=$lt_cv_path_NM ++else ++ # Didn't find any BSD compatible name lister, look for dumpbin. ++ if test -n "$DUMPBIN"; then : ++ # Let the user override the test. ++ else ++ if test -n "$ac_tool_prefix"; then ++ for ac_prog in dumpbin "link -dump" ++ do ++ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. ++set dummy $ac_tool_prefix$ac_prog; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_DUMPBIN+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$DUMPBIN"; then ++ ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++DUMPBIN=$ac_cv_prog_DUMPBIN ++if test -n "$DUMPBIN"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 ++printf "%s\n" "$DUMPBIN" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ test -n "$DUMPBIN" && break ++ done ++fi ++if test -z "$DUMPBIN"; then ++ ac_ct_DUMPBIN=$DUMPBIN ++ for ac_prog in dumpbin "link -dump" ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_DUMPBIN+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_DUMPBIN"; then ++ ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN ++if test -n "$ac_ct_DUMPBIN"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 ++printf "%s\n" "$ac_ct_DUMPBIN" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ test -n "$ac_ct_DUMPBIN" && break ++done ++ ++ if test "x$ac_ct_DUMPBIN" = x; then ++ DUMPBIN=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ DUMPBIN=$ac_ct_DUMPBIN ++ fi ++fi ++ ++ case `$DUMPBIN -symbols -headers /dev/null 2>&1 | $SED '1q'` in ++ *COFF*) ++ DUMPBIN="$DUMPBIN -symbols -headers" ++ ;; ++ *) ++ DUMPBIN=: ++ ;; ++ esac ++ fi ++ ++ if test : != "$DUMPBIN"; then ++ NM=$DUMPBIN ++ fi ++fi ++test -z "$NM" && NM=nm ++ ++ ++ ++ ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 ++printf %s "checking the name lister ($NM) interface... " >&6; } ++if test ${lt_cv_nm_interface+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_nm_interface="BSD nm" ++ echo "int some_variable = 0;" > conftest.$ac_ext ++ (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) ++ (eval "$ac_compile" 2>conftest.err) ++ cat conftest.err >&5 ++ (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) ++ (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) ++ cat conftest.err >&5 ++ (eval echo "\"\$as_me:$LINENO: output\"" >&5) ++ cat conftest.out >&5 ++ if $GREP 'External.*some_variable' conftest.out > /dev/null; then ++ lt_cv_nm_interface="MS dumpbin" ++ fi ++ rm -f conftest* ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 ++printf "%s\n" "$lt_cv_nm_interface" >&6; } ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 ++printf %s "checking whether ln -s works... " >&6; } ++LN_S=$as_ln_s ++if test "$LN_S" = "ln -s"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 ++printf "%s\n" "yes" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 ++printf "%s\n" "no, using $LN_S" >&6; } ++fi ++ ++# find the maximum length of command line arguments ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 ++printf %s "checking the maximum length of command line arguments... " >&6; } ++if test ${lt_cv_sys_max_cmd_len+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ i=0 ++ teststring=ABCD ++ ++ case $build_os in ++ msdosdjgpp*) ++ # On DJGPP, this test can blow up pretty badly due to problems in libc ++ # (any single argument exceeding 2000 bytes causes a buffer overrun ++ # during glob expansion). Even if it were fixed, the result of this ++ # check would be larger than it should be. ++ lt_cv_sys_max_cmd_len=12288; # 12K is about right ++ ;; ++ ++ gnu*) ++ # Under GNU Hurd, this test is not required because there is ++ # no limit to the length of command line arguments. ++ # Libtool will interpret -1 as no limit whatsoever ++ lt_cv_sys_max_cmd_len=-1; ++ ;; ++ ++ cygwin* | mingw* | cegcc*) ++ # On Win9x/ME, this test blows up -- it succeeds, but takes ++ # about 5 minutes as the teststring grows exponentially. ++ # Worse, since 9x/ME are not pre-emptively multitasking, ++ # you end up with a "frozen" computer, even though with patience ++ # the test eventually succeeds (with a max line length of 256k). ++ # Instead, let's just punt: use the minimum linelength reported by ++ # all of the supported platforms: 8192 (on NT/2K/XP). ++ lt_cv_sys_max_cmd_len=8192; ++ ;; ++ ++ mint*) ++ # On MiNT this can take a long time and run out of memory. ++ lt_cv_sys_max_cmd_len=8192; ++ ;; ++ ++ amigaos*) ++ # On AmigaOS with pdksh, this test takes hours, literally. ++ # So we just punt and use a minimum line length of 8192. ++ lt_cv_sys_max_cmd_len=8192; ++ ;; ++ ++ bitrig* | darwin* | dragonfly* | freebsd* | midnightbsd* | netbsd* | openbsd*) ++ # This has been around since 386BSD, at least. Likely further. ++ if test -x /sbin/sysctl; then ++ lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` ++ elif test -x /usr/sbin/sysctl; then ++ lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` ++ else ++ lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs ++ fi ++ # And add a safety zone ++ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` ++ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ++ ;; ++ ++ interix*) ++ # We know the value 262144 and hardcode it with a safety zone (like BSD) ++ lt_cv_sys_max_cmd_len=196608 ++ ;; ++ ++ os2*) ++ # The test takes a long time on OS/2. ++ lt_cv_sys_max_cmd_len=8192 ++ ;; ++ ++ osf*) ++ # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure ++ # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not ++ # nice to cause kernel panics so lets avoid the loop below. ++ # First set a reasonable default. ++ lt_cv_sys_max_cmd_len=16384 ++ # ++ if test -x /sbin/sysconfig; then ++ case `/sbin/sysconfig -q proc exec_disable_arg_limit` in ++ *1*) lt_cv_sys_max_cmd_len=-1 ;; ++ esac ++ fi ++ ;; ++ sco3.2v5*) ++ lt_cv_sys_max_cmd_len=102400 ++ ;; ++ sysv5* | sco5v6* | sysv4.2uw2*) ++ kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` ++ if test -n "$kargmax"; then ++ lt_cv_sys_max_cmd_len=`echo $kargmax | $SED 's/.*[ ]//'` ++ else ++ lt_cv_sys_max_cmd_len=32768 ++ fi ++ ;; ++ *) ++ lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` ++ if test -n "$lt_cv_sys_max_cmd_len" && \ ++ test undefined != "$lt_cv_sys_max_cmd_len"; then ++ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` ++ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ++ else ++ # Make teststring a little bigger before we do anything with it. ++ # a 1K string should be a reasonable start. ++ for i in 1 2 3 4 5 6 7 8; do ++ teststring=$teststring$teststring ++ done ++ SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} ++ # If test is not a shell built-in, we'll probably end up computing a ++ # maximum length that is only half of the actual maximum length, but ++ # we can't tell. ++ while { test X`env echo "$teststring$teststring" 2>/dev/null` \ ++ = "X$teststring$teststring"; } >/dev/null 2>&1 && ++ test 17 != "$i" # 1/2 MB should be enough ++ do ++ i=`expr $i + 1` ++ teststring=$teststring$teststring ++ done ++ # Only check the string length outside the loop. ++ lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` ++ teststring= ++ # Add a significant safety factor because C++ compilers can tack on ++ # massive amounts of additional arguments before passing them to the ++ # linker. It appears as though 1/2 is a usable value. ++ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` ++ fi ++ ;; ++ esac ++ ++fi ++ ++if test -n "$lt_cv_sys_max_cmd_len"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 ++printf "%s\n" "$lt_cv_sys_max_cmd_len" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none" >&5 ++printf "%s\n" "none" >&6; } ++fi ++max_cmd_len=$lt_cv_sys_max_cmd_len ++ ++ ++ ++ ++ ++ ++: ${CP="cp -f"} ++: ${MV="mv -f"} ++: ${RM="rm -f"} ++ ++if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then ++ lt_unset=unset ++else ++ lt_unset=false ++fi ++ ++ ++ ++ ++ ++# test EBCDIC or ASCII ++case `echo X|tr X '\101'` in ++ A) # ASCII based system ++ # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr ++ lt_SP2NL='tr \040 \012' ++ lt_NL2SP='tr \015\012 \040\040' ++ ;; ++ *) # EBCDIC based system ++ lt_SP2NL='tr \100 \n' ++ lt_NL2SP='tr \r\n \100\100' ++ ;; ++esac ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 ++printf %s "checking how to convert $build file names to $host format... " >&6; } ++if test ${lt_cv_to_host_file_cmd+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ++ ;; ++ esac ++ ;; ++ *-*-cygwin* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ++ ;; ++ esac ++ ;; ++ * ) # unhandled hosts (and "normal" native builds) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++esac ++ ++fi ++ ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 ++printf "%s\n" "$lt_cv_to_host_file_cmd" >&6; } ++ ++ ++ ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 ++printf %s "checking how to convert $build file names to toolchain format... " >&6; } ++if test ${lt_cv_to_tool_file_cmd+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ #assume ordinary cross tools, or native build. ++lt_cv_to_tool_file_cmd=func_convert_file_noop ++case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ esac ++ ;; ++esac ++ ++fi ++ ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 ++printf "%s\n" "$lt_cv_to_tool_file_cmd" >&6; } ++ ++ ++ ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 ++printf %s "checking for $LD option to reload object files... " >&6; } ++if test ${lt_cv_ld_reload_flag+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_ld_reload_flag='-r' ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 ++printf "%s\n" "$lt_cv_ld_reload_flag" >&6; } ++reload_flag=$lt_cv_ld_reload_flag ++case $reload_flag in ++"" | " "*) ;; ++*) reload_flag=" $reload_flag" ;; ++esac ++reload_cmds='$LD$reload_flag -o $output$reload_objs' ++case $host_os in ++ cygwin* | mingw* | pw32* | cegcc*) ++ if test yes != "$GCC"; then ++ reload_cmds=false ++ fi ++ ;; ++ darwin*) ++ if test yes = "$GCC"; then ++ reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' ++ else ++ reload_cmds='$LD$reload_flag -o $output$reload_objs' ++ fi ++ ;; ++esac ++ ++ ++ ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}file", so it can be a program name with args. ++set dummy ${ac_tool_prefix}file; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_FILECMD+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$FILECMD"; then ++ ac_cv_prog_FILECMD="$FILECMD" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_FILECMD="${ac_tool_prefix}file" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++FILECMD=$ac_cv_prog_FILECMD ++if test -n "$FILECMD"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $FILECMD" >&5 ++printf "%s\n" "$FILECMD" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_FILECMD"; then ++ ac_ct_FILECMD=$FILECMD ++ # Extract the first word of "file", so it can be a program name with args. ++set dummy file; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_FILECMD+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_FILECMD"; then ++ ac_cv_prog_ac_ct_FILECMD="$ac_ct_FILECMD" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_FILECMD="file" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_FILECMD=$ac_cv_prog_ac_ct_FILECMD ++if test -n "$ac_ct_FILECMD"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_FILECMD" >&5 ++printf "%s\n" "$ac_ct_FILECMD" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_FILECMD" = x; then ++ FILECMD=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ FILECMD=$ac_ct_FILECMD ++ fi ++else ++ FILECMD="$ac_cv_prog_FILECMD" ++fi ++ ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. ++set dummy ${ac_tool_prefix}objdump; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_OBJDUMP+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$OBJDUMP"; then ++ ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++OBJDUMP=$ac_cv_prog_OBJDUMP ++if test -n "$OBJDUMP"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 ++printf "%s\n" "$OBJDUMP" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_OBJDUMP"; then ++ ac_ct_OBJDUMP=$OBJDUMP ++ # Extract the first word of "objdump", so it can be a program name with args. ++set dummy objdump; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_OBJDUMP+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_OBJDUMP"; then ++ ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_OBJDUMP="objdump" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP ++if test -n "$ac_ct_OBJDUMP"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 ++printf "%s\n" "$ac_ct_OBJDUMP" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_OBJDUMP" = x; then ++ OBJDUMP="false" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ OBJDUMP=$ac_ct_OBJDUMP ++ fi ++else ++ OBJDUMP="$ac_cv_prog_OBJDUMP" ++fi ++ ++test -z "$OBJDUMP" && OBJDUMP=objdump ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 ++printf %s "checking how to recognize dependent libraries... " >&6; } ++if test ${lt_cv_deplibs_check_method+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_file_magic_cmd='$MAGIC_CMD' ++lt_cv_file_magic_test_file= ++lt_cv_deplibs_check_method='unknown' ++# Need to set the preceding variable on all platforms that support ++# interlibrary dependencies. ++# 'none' -- dependencies not supported. ++# 'unknown' -- same as none, but documents that we really don't know. ++# 'pass_all' -- all dependencies passed with no checks. ++# 'test_compile' -- check by making test program. ++# 'file_magic [[regex]]' -- check by looking for files in library path ++# that responds to the $file_magic_cmd with a given extended regex. ++# If you have 'file' or equivalent on your system and you're not sure ++# whether 'pass_all' will *always* work, you probably want this one. ++ ++case $host_os in ++aix[4-9]*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++beos*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++bsdi[45]*) ++ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' ++ lt_cv_file_magic_cmd='$FILECMD -L' ++ lt_cv_file_magic_test_file=/shlib/libc.so ++ ;; ++ ++cygwin*) ++ # func_win32_libid is a shell function defined in ltmain.sh ++ lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' ++ lt_cv_file_magic_cmd='func_win32_libid' ++ ;; ++ ++mingw* | pw32*) ++ # Base MSYS/MinGW do not provide the 'file' command needed by ++ # func_win32_libid shell function, so use a weaker test based on 'objdump', ++ # unless we find 'file', for example because we are cross-compiling. ++ if ( file / ) >/dev/null 2>&1; then ++ lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' ++ lt_cv_file_magic_cmd='func_win32_libid' ++ else ++ # Keep this pattern in sync with the one in func_win32_libid. ++ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' ++ lt_cv_file_magic_cmd='$OBJDUMP -f' ++ fi ++ ;; ++ ++cegcc*) ++ # use the weaker test based on 'objdump'. See mingw*. ++ lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' ++ lt_cv_file_magic_cmd='$OBJDUMP -f' ++ ;; ++ ++darwin* | rhapsody*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++freebsd* | dragonfly* | midnightbsd*) ++ if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then ++ case $host_cpu in ++ i*86 ) ++ # Not sure whether the presence of OpenBSD here was a mistake. ++ # Let's accept both of them until this is cleared up. ++ lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' ++ lt_cv_file_magic_cmd=$FILECMD ++ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ++ ;; ++ esac ++ else ++ lt_cv_deplibs_check_method=pass_all ++ fi ++ ;; ++ ++haiku*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++hpux10.20* | hpux11*) ++ lt_cv_file_magic_cmd=$FILECMD ++ case $host_cpu in ++ ia64*) ++ lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' ++ lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ++ ;; ++ hppa*64*) ++ lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' ++ lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ++ ;; ++ *) ++ lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' ++ lt_cv_file_magic_test_file=/usr/lib/libc.sl ++ ;; ++ esac ++ ;; ++ ++interix[3-9]*) ++ # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here ++ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' ++ ;; ++ ++irix5* | irix6* | nonstopux*) ++ case $LD in ++ *-32|*"-32 ") libmagic=32-bit;; ++ *-n32|*"-n32 ") libmagic=N32;; ++ *-64|*"-64 ") libmagic=64-bit;; ++ *) libmagic=never-match;; ++ esac ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++# This must be glibc/ELF. ++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++netbsd*) ++ if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then ++ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' ++ else ++ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' ++ fi ++ ;; ++ ++newos6*) ++ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' ++ lt_cv_file_magic_cmd=$FILECMD ++ lt_cv_file_magic_test_file=/usr/lib/libnls.so ++ ;; ++ ++*nto* | *qnx*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++openbsd* | bitrig*) ++ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then ++ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' ++ else ++ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' ++ fi ++ ;; ++ ++osf3* | osf4* | osf5*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++rdos*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++solaris*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ ++sysv4 | sysv4.3*) ++ case $host_vendor in ++ motorola) ++ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' ++ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ++ ;; ++ ncr) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ sequent) ++ lt_cv_file_magic_cmd='/bin/file' ++ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' ++ ;; ++ sni) ++ lt_cv_file_magic_cmd='/bin/file' ++ lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" ++ lt_cv_file_magic_test_file=/lib/libc.so ++ ;; ++ siemens) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ pc) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ esac ++ ;; ++ ++tpf*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++os2*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++esac ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 ++printf "%s\n" "$lt_cv_deplibs_check_method" >&6; } ++ ++file_magic_glob= ++want_nocaseglob=no ++if test "$build" = "$host"; then ++ case $host_os in ++ mingw* | pw32*) ++ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then ++ want_nocaseglob=yes ++ else ++ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` ++ fi ++ ;; ++ esac ++fi ++ ++file_magic_cmd=$lt_cv_file_magic_cmd ++deplibs_check_method=$lt_cv_deplibs_check_method ++test -z "$deplibs_check_method" && deplibs_check_method=unknown ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. ++set dummy ${ac_tool_prefix}dlltool; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_DLLTOOL+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$DLLTOOL"; then ++ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++DLLTOOL=$ac_cv_prog_DLLTOOL ++if test -n "$DLLTOOL"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 ++printf "%s\n" "$DLLTOOL" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_DLLTOOL"; then ++ ac_ct_DLLTOOL=$DLLTOOL ++ # Extract the first word of "dlltool", so it can be a program name with args. ++set dummy dlltool; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_DLLTOOL+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_DLLTOOL"; then ++ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_DLLTOOL="dlltool" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL ++if test -n "$ac_ct_DLLTOOL"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 ++printf "%s\n" "$ac_ct_DLLTOOL" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_DLLTOOL" = x; then ++ DLLTOOL="false" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ DLLTOOL=$ac_ct_DLLTOOL ++ fi ++else ++ DLLTOOL="$ac_cv_prog_DLLTOOL" ++fi ++ ++test -z "$DLLTOOL" && DLLTOOL=dlltool ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 ++printf %s "checking how to associate runtime and link libraries... " >&6; } ++if test ${lt_cv_sharedlib_from_linklib_cmd+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_sharedlib_from_linklib_cmd='unknown' ++ ++case $host_os in ++cygwin* | mingw* | pw32* | cegcc*) ++ # two different shell functions defined in ltmain.sh; ++ # decide which one to use based on capabilities of $DLLTOOL ++ case `$DLLTOOL --help 2>&1` in ++ *--identify-strict*) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ++ ;; ++ *) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ++ ;; ++ esac ++ ;; ++*) ++ # fallback: assume linklib IS sharedlib ++ lt_cv_sharedlib_from_linklib_cmd=$ECHO ++ ;; ++esac ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 ++printf "%s\n" "$lt_cv_sharedlib_from_linklib_cmd" >&6; } ++sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd ++test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO ++ ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ for ac_prog in ar ++ do ++ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. ++set dummy $ac_tool_prefix$ac_prog; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_AR+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$AR"; then ++ ac_cv_prog_AR="$AR" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++AR=$ac_cv_prog_AR ++if test -n "$AR"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 ++printf "%s\n" "$AR" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ test -n "$AR" && break ++ done ++fi ++if test -z "$AR"; then ++ ac_ct_AR=$AR ++ for ac_prog in ar ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_AR+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_AR"; then ++ ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_AR="$ac_prog" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_AR=$ac_cv_prog_ac_ct_AR ++if test -n "$ac_ct_AR"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 ++printf "%s\n" "$ac_ct_AR" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ test -n "$ac_ct_AR" && break ++done ++ ++ if test "x$ac_ct_AR" = x; then ++ AR="false" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ AR=$ac_ct_AR ++ fi ++fi ++ ++: ${AR=ar} ++ ++ ++ ++ ++ ++ ++# Use ARFLAGS variable as AR's operation code to sync the variable naming with ++# Automake. If both AR_FLAGS and ARFLAGS are specified, AR_FLAGS should have ++# higher priority because thats what people were doing historically (setting ++# ARFLAGS for automake and AR_FLAGS for libtool). FIXME: Make the AR_FLAGS ++# variable obsoleted/removed. ++ ++test ${AR_FLAGS+y} || AR_FLAGS=${ARFLAGS-cr} ++lt_ar_flags=$AR_FLAGS ++ ++ ++ ++ ++ ++ ++# Make AR_FLAGS overridable by 'make ARFLAGS='. Don't try to run-time override ++# by AR_FLAGS because that was never working and AR_FLAGS is about to die. ++ ++ ++ ++ ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 ++printf %s "checking for archiver @FILE support... " >&6; } ++if test ${lt_cv_ar_at_file+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_ar_at_file=no ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ echo conftest.$ac_objext > conftest.lst ++ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test 0 -eq "$ac_status"; then ++ # Ensure the archiver fails upon bogus file names. ++ rm -f conftest.$ac_objext libconftest.a ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test 0 -ne "$ac_status"; then ++ lt_cv_ar_at_file=@ ++ fi ++ fi ++ rm -f conftest.* libconftest.a ++ ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 ++printf "%s\n" "$lt_cv_ar_at_file" >&6; } ++ ++if test no = "$lt_cv_ar_at_file"; then ++ archiver_list_spec= ++else ++ archiver_list_spec=$lt_cv_ar_at_file ++fi ++ ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. ++set dummy ${ac_tool_prefix}strip; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_STRIP+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$STRIP"; then ++ ac_cv_prog_STRIP="$STRIP" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_STRIP="${ac_tool_prefix}strip" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++STRIP=$ac_cv_prog_STRIP ++if test -n "$STRIP"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 ++printf "%s\n" "$STRIP" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_STRIP"; then ++ ac_ct_STRIP=$STRIP ++ # Extract the first word of "strip", so it can be a program name with args. ++set dummy strip; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_STRIP+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_STRIP"; then ++ ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_STRIP="strip" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP ++if test -n "$ac_ct_STRIP"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 ++printf "%s\n" "$ac_ct_STRIP" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_STRIP" = x; then ++ STRIP=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ STRIP=$ac_ct_STRIP ++ fi ++else ++ STRIP="$ac_cv_prog_STRIP" ++fi ++ ++test -z "$STRIP" && STRIP=: ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. ++set dummy ${ac_tool_prefix}ranlib; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_RANLIB+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$RANLIB"; then ++ ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++RANLIB=$ac_cv_prog_RANLIB ++if test -n "$RANLIB"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 ++printf "%s\n" "$RANLIB" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_RANLIB"; then ++ ac_ct_RANLIB=$RANLIB ++ # Extract the first word of "ranlib", so it can be a program name with args. ++set dummy ranlib; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_RANLIB+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_RANLIB"; then ++ ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_RANLIB="ranlib" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB ++if test -n "$ac_ct_RANLIB"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 ++printf "%s\n" "$ac_ct_RANLIB" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_RANLIB" = x; then ++ RANLIB=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ RANLIB=$ac_ct_RANLIB ++ fi ++else ++ RANLIB="$ac_cv_prog_RANLIB" ++fi ++ ++test -z "$RANLIB" && RANLIB=: ++ ++ ++ ++ ++ ++ ++# Determine commands to create old-style static archives. ++old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' ++old_postinstall_cmds='chmod 644 $oldlib' ++old_postuninstall_cmds= ++ ++if test -n "$RANLIB"; then ++ case $host_os in ++ bitrig* | openbsd*) ++ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" ++ ;; ++ *) ++ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" ++ ;; ++ esac ++ old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" ++fi ++ ++case $host_os in ++ darwin*) ++ lock_old_archive_extraction=yes ;; ++ *) ++ lock_old_archive_extraction=no ;; ++esac ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++# If no C compiler was specified, use CC. ++LTCC=${LTCC-"$CC"} ++ ++# If no C compiler flags were specified, use CFLAGS. ++LTCFLAGS=${LTCFLAGS-"$CFLAGS"} ++ ++# Allow CC to be a program name with arguments. ++compiler=$CC ++ ++ ++# Check for command to grab the raw symbol name followed by C symbol from nm. ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 ++printf %s "checking command to parse $NM output from $compiler object... " >&6; } ++if test ${lt_cv_sys_global_symbol_pipe+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ++# These are sane defaults that work on at least a few old systems. ++# [They come from Ultrix. What could be older than Ultrix?!! ;)] ++ ++# Character class describing NM global symbol codes. ++symcode='[BCDEGRST]' ++ ++# Regexp to match symbols that can be accessed directly from C. ++sympat='\([_A-Za-z][_A-Za-z0-9]*\)' ++ ++# Define system-specific variables. ++case $host_os in ++aix*) ++ symcode='[BCDT]' ++ ;; ++cygwin* | mingw* | pw32* | cegcc*) ++ symcode='[ABCDGISTW]' ++ ;; ++hpux*) ++ if test ia64 = "$host_cpu"; then ++ symcode='[ABCDEGRST]' ++ fi ++ ;; ++irix* | nonstopux*) ++ symcode='[BCDEGRST]' ++ ;; ++osf*) ++ symcode='[BCDEGQRST]' ++ ;; ++solaris*) ++ symcode='[BDRT]' ++ ;; ++sco3.2v5*) ++ symcode='[DT]' ++ ;; ++sysv4.2uw2*) ++ symcode='[DT]' ++ ;; ++sysv5* | sco5v6* | unixware* | OpenUNIX*) ++ symcode='[ABDT]' ++ ;; ++sysv4) ++ symcode='[DFNSTU]' ++ ;; ++esac ++ ++# If we're using GNU nm, then use its standard symbol codes. ++case `$NM -V 2>&1` in ++*GNU* | *'with BFD'*) ++ symcode='[ABCDGIRSTW]' ;; ++esac ++ ++if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ # Gets list of data symbols to import. ++ lt_cv_sys_global_symbol_to_import="$SED -n -e 's/^I .* \(.*\)$/\1/p'" ++ # Adjust the below global symbol transforms to fixup imported variables. ++ lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" ++ lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" ++ lt_c_name_lib_hook="\ ++ -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ ++ -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" ++else ++ # Disable hooks by default. ++ lt_cv_sys_global_symbol_to_import= ++ lt_cdecl_hook= ++ lt_c_name_hook= ++ lt_c_name_lib_hook= ++fi ++ ++# Transform an extracted symbol line into a proper C declaration. ++# Some systems (esp. on ia64) link data and code symbols differently, ++# so use this general approach. ++lt_cv_sys_global_symbol_to_cdecl="$SED -n"\ ++$lt_cdecl_hook\ ++" -e 's/^T .* \(.*\)$/extern int \1();/p'"\ ++" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" ++ ++# Transform an extracted symbol line into symbol name and symbol address ++lt_cv_sys_global_symbol_to_c_name_address="$SED -n"\ ++$lt_c_name_hook\ ++" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ ++" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" ++ ++# Transform an extracted symbol line into symbol name with lib prefix and ++# symbol address. ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="$SED -n"\ ++$lt_c_name_lib_hook\ ++" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ ++" -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ ++" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" ++ ++# Handle CRLF in mingw tool chain ++opt_cr= ++case $build_os in ++mingw*) ++ opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ++ ;; ++esac ++ ++# Try without a prefix underscore, then with it. ++for ac_symprfx in "" "_"; do ++ ++ # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. ++ symxfrm="\\1 $ac_symprfx\\2 \\2" ++ ++ # Write the raw and C identifiers. ++ if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ # Fake it for dumpbin and say T for any non-static function, ++ # D for any global variable and I for any imported variable. ++ # Also find C++ and __fastcall symbols from MSVC++ or ICC, ++ # which start with @ or ?. ++ lt_cv_sys_global_symbol_pipe="$AWK '"\ ++" {last_section=section; section=\$ 3};"\ ++" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ ++" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ ++" /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ ++" /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ ++" /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ ++" \$ 0!~/External *\|/{next};"\ ++" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ ++" {if(hide[section]) next};"\ ++" {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ ++" {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ ++" s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ ++" s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ ++" ' prfx=^$ac_symprfx" ++ else ++ lt_cv_sys_global_symbol_pipe="$SED -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" ++ fi ++ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | $SED '/ __gnu_lto/d'" ++ ++ # Check to see that the pipe works correctly. ++ pipe_works=no ++ ++ rm -f conftest* ++ cat > conftest.$ac_ext <<_LT_EOF ++#ifdef __cplusplus ++extern "C" { ++#endif ++char nm_test_var; ++void nm_test_func(void); ++void nm_test_func(void){} ++#ifdef __cplusplus ++} ++#endif ++int main(){nm_test_var='a';nm_test_func();return(0);} ++_LT_EOF ++ ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 ++ (eval $ac_compile) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; then ++ # Now try to grab the symbols. ++ nlist=conftest.nm ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 ++ (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } && test -s "$nlist"; then ++ # Try sorting and uniquifying the output. ++ if sort "$nlist" | uniq > "$nlist"T; then ++ mv -f "$nlist"T "$nlist" ++ else ++ rm -f "$nlist"T ++ fi ++ ++ # Make sure that we snagged all the symbols we need. ++ if $GREP ' nm_test_var$' "$nlist" >/dev/null; then ++ if $GREP ' nm_test_func$' "$nlist" >/dev/null; then ++ cat <<_LT_EOF > conftest.$ac_ext ++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ ++#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE ++/* DATA imports from DLLs on WIN32 can't be const, because runtime ++ relocations are performed -- see ld's documentation on pseudo-relocs. */ ++# define LT_DLSYM_CONST ++#elif defined __osf__ ++/* This system does not cope well with relocations in const data. */ ++# define LT_DLSYM_CONST ++#else ++# define LT_DLSYM_CONST const ++#endif ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++_LT_EOF ++ # Now generate the symbol file. ++ eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' ++ ++ cat <<_LT_EOF >> conftest.$ac_ext ++ ++/* The mapping between symbol names and symbols. */ ++LT_DLSYM_CONST struct { ++ const char *name; ++ void *address; ++} ++lt__PROGRAM__LTX_preloaded_symbols[] = ++{ ++ { "@PROGRAM@", (void *) 0 }, ++_LT_EOF ++ $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext ++ cat <<\_LT_EOF >> conftest.$ac_ext ++ {0, (void *) 0} ++}; ++ ++/* This works around a problem in FreeBSD linker */ ++#ifdef FREEBSD_WORKAROUND ++static const void *lt_preloaded_setup() { ++ return lt__PROGRAM__LTX_preloaded_symbols; ++} ++#endif ++ ++#ifdef __cplusplus ++} ++#endif ++_LT_EOF ++ # Now try linking the two files. ++ mv conftest.$ac_objext conftstm.$ac_objext ++ lt_globsym_save_LIBS=$LIBS ++ lt_globsym_save_CFLAGS=$CFLAGS ++ LIBS=conftstm.$ac_objext ++ CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 ++ (eval $ac_link) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } && test -s conftest$ac_exeext; then ++ pipe_works=yes ++ fi ++ LIBS=$lt_globsym_save_LIBS ++ CFLAGS=$lt_globsym_save_CFLAGS ++ else ++ echo "cannot find nm_test_func in $nlist" >&5 ++ fi ++ else ++ echo "cannot find nm_test_var in $nlist" >&5 ++ fi ++ else ++ echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 ++ fi ++ else ++ echo "$progname: failed program was:" >&5 ++ cat conftest.$ac_ext >&5 ++ fi ++ rm -rf conftest* conftst* ++ ++ # Do not use the global_symbol_pipe unless it works. ++ if test yes = "$pipe_works"; then ++ break ++ else ++ lt_cv_sys_global_symbol_pipe= ++ fi ++done ++ ++fi ++ ++if test -z "$lt_cv_sys_global_symbol_pipe"; then ++ lt_cv_sys_global_symbol_to_cdecl= ++fi ++if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: failed" >&5 ++printf "%s\n" "failed" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ok" >&5 ++printf "%s\n" "ok" >&6; } ++fi ++ ++# Response file support. ++if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ nm_file_list_spec='@' ++elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then ++ nm_file_list_spec='@' ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 ++printf %s "checking for sysroot... " >&6; } ++ ++# Check whether --with-sysroot was given. ++if test ${with_sysroot+y} ++then : ++ withval=$with_sysroot; ++else $as_nop ++ with_sysroot=no ++fi ++ ++ ++lt_sysroot= ++case $with_sysroot in #( ++ yes) ++ if test yes = "$GCC"; then ++ lt_sysroot=`$CC --print-sysroot 2>/dev/null` ++ fi ++ ;; #( ++ /*) ++ lt_sysroot=`echo "$with_sysroot" | $SED -e "$sed_quote_subst"` ++ ;; #( ++ no|'') ++ ;; #( ++ *) ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5 ++printf "%s\n" "$with_sysroot" >&6; } ++ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ++ ;; ++esac ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 ++printf "%s\n" "${lt_sysroot:-no}" >&6; } ++ ++ ++ ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5 ++printf %s "checking for a working dd... " >&6; } ++if test ${ac_cv_path_lt_DD+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ printf 0123456789abcdef0123456789abcdef >conftest.i ++cat conftest.i conftest.i >conftest2.i ++: ${lt_DD:=$DD} ++if test -z "$lt_DD"; then ++ ac_path_lt_DD_found=false ++ # Loop through the user's path and test for each of PROGNAME-LIST ++ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_prog in dd ++ do ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ ac_path_lt_DD="$as_dir$ac_prog$ac_exec_ext" ++ as_fn_executable_p "$ac_path_lt_DD" || continue ++if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then ++ cmp -s conftest.i conftest.out \ ++ && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: ++fi ++ $ac_path_lt_DD_found && break 3 ++ done ++ done ++ done ++IFS=$as_save_IFS ++ if test -z "$ac_cv_path_lt_DD"; then ++ : ++ fi ++else ++ ac_cv_path_lt_DD=$lt_DD ++fi ++ ++rm -f conftest.i conftest2.i conftest.out ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5 ++printf "%s\n" "$ac_cv_path_lt_DD" >&6; } ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5 ++printf %s "checking how to truncate binary pipes... " >&6; } ++if test ${lt_cv_truncate_bin+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ printf 0123456789abcdef0123456789abcdef >conftest.i ++cat conftest.i conftest.i >conftest2.i ++lt_cv_truncate_bin= ++if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then ++ cmp -s conftest.i conftest.out \ ++ && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" ++fi ++rm -f conftest.i conftest2.i conftest.out ++test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q" ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5 ++printf "%s\n" "$lt_cv_truncate_bin" >&6; } ++ ++ ++ ++ ++ ++ ++ ++# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. ++func_cc_basename () ++{ ++ for cc_temp in $*""; do ++ case $cc_temp in ++ compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; ++ distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; ++ \-*) ;; ++ *) break;; ++ esac ++ done ++ func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` ++} ++ ++# Check whether --enable-libtool-lock was given. ++if test ${enable_libtool_lock+y} ++then : ++ enableval=$enable_libtool_lock; ++fi ++ ++test no = "$enable_libtool_lock" || enable_libtool_lock=yes ++ ++# Some flags need to be propagated to the compiler or linker for good ++# libtool support. ++case $host in ++ia64-*-hpux*) ++ # Find out what ABI is being produced by ac_compile, and set mode ++ # options accordingly. ++ echo 'int i;' > conftest.$ac_ext ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 ++ (eval $ac_compile) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; then ++ case `$FILECMD conftest.$ac_objext` in ++ *ELF-32*) ++ HPUX_IA64_MODE=32 ++ ;; ++ *ELF-64*) ++ HPUX_IA64_MODE=64 ++ ;; ++ esac ++ fi ++ rm -rf conftest* ++ ;; ++*-*-irix6*) ++ # Find out what ABI is being produced by ac_compile, and set linker ++ # options accordingly. ++ echo '#line '$LINENO' "configure"' > conftest.$ac_ext ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 ++ (eval $ac_compile) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; then ++ if test yes = "$lt_cv_prog_gnu_ld"; then ++ case `$FILECMD conftest.$ac_objext` in ++ *32-bit*) ++ LD="${LD-ld} -melf32bsmip" ++ ;; ++ *N32*) ++ LD="${LD-ld} -melf32bmipn32" ++ ;; ++ *64-bit*) ++ LD="${LD-ld} -melf64bmip" ++ ;; ++ esac ++ else ++ case `$FILECMD conftest.$ac_objext` in ++ *32-bit*) ++ LD="${LD-ld} -32" ++ ;; ++ *N32*) ++ LD="${LD-ld} -n32" ++ ;; ++ *64-bit*) ++ LD="${LD-ld} -64" ++ ;; ++ esac ++ fi ++ fi ++ rm -rf conftest* ++ ;; ++ ++mips64*-*linux*) ++ # Find out what ABI is being produced by ac_compile, and set linker ++ # options accordingly. ++ echo '#line '$LINENO' "configure"' > conftest.$ac_ext ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 ++ (eval $ac_compile) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; then ++ emul=elf ++ case `$FILECMD conftest.$ac_objext` in ++ *32-bit*) ++ emul="${emul}32" ++ ;; ++ *64-bit*) ++ emul="${emul}64" ++ ;; ++ esac ++ case `$FILECMD conftest.$ac_objext` in ++ *MSB*) ++ emul="${emul}btsmip" ++ ;; ++ *LSB*) ++ emul="${emul}ltsmip" ++ ;; ++ esac ++ case `$FILECMD conftest.$ac_objext` in ++ *N32*) ++ emul="${emul}n32" ++ ;; ++ esac ++ LD="${LD-ld} -m $emul" ++ fi ++ rm -rf conftest* ++ ;; ++ ++x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ ++s390*-*linux*|s390*-*tpf*|sparc*-*linux*) ++ # Find out what ABI is being produced by ac_compile, and set linker ++ # options accordingly. Note that the listed cases only cover the ++ # situations where additional linker options are needed (such as when ++ # doing 32-bit compilation for a host where ld defaults to 64-bit, or ++ # vice versa); the common cases where no linker options are needed do ++ # not appear in the list. ++ echo 'int i;' > conftest.$ac_ext ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 ++ (eval $ac_compile) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; then ++ case `$FILECMD conftest.o` in ++ *32-bit*) ++ case $host in ++ x86_64-*kfreebsd*-gnu) ++ LD="${LD-ld} -m elf_i386_fbsd" ++ ;; ++ x86_64-*linux*) ++ case `$FILECMD conftest.o` in ++ *x86-64*) ++ LD="${LD-ld} -m elf32_x86_64" ++ ;; ++ *) ++ LD="${LD-ld} -m elf_i386" ++ ;; ++ esac ++ ;; ++ powerpc64le-*linux*) ++ LD="${LD-ld} -m elf32lppclinux" ++ ;; ++ powerpc64-*linux*) ++ LD="${LD-ld} -m elf32ppclinux" ++ ;; ++ s390x-*linux*) ++ LD="${LD-ld} -m elf_s390" ++ ;; ++ sparc64-*linux*) ++ LD="${LD-ld} -m elf32_sparc" ++ ;; ++ esac ++ ;; ++ *64-bit*) ++ case $host in ++ x86_64-*kfreebsd*-gnu) ++ LD="${LD-ld} -m elf_x86_64_fbsd" ++ ;; ++ x86_64-*linux*) ++ LD="${LD-ld} -m elf_x86_64" ++ ;; ++ powerpcle-*linux*) ++ LD="${LD-ld} -m elf64lppc" ++ ;; ++ powerpc-*linux*) ++ LD="${LD-ld} -m elf64ppc" ++ ;; ++ s390*-*linux*|s390*-*tpf*) ++ LD="${LD-ld} -m elf64_s390" ++ ;; ++ sparc*-*linux*) ++ LD="${LD-ld} -m elf64_sparc" ++ ;; ++ esac ++ ;; ++ esac ++ fi ++ rm -rf conftest* ++ ;; ++ ++*-*-sco3.2v5*) ++ # On SCO OpenServer 5, we need -belf to get full-featured binaries. ++ SAVE_CFLAGS=$CFLAGS ++ CFLAGS="$CFLAGS -belf" ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 ++printf %s "checking whether the C compiler needs -belf... " >&6; } ++if test ${lt_cv_cc_needs_belf+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_link "$LINENO" ++then : ++ lt_cv_cc_needs_belf=yes ++else $as_nop ++ lt_cv_cc_needs_belf=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++ ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 ++printf "%s\n" "$lt_cv_cc_needs_belf" >&6; } ++ if test yes != "$lt_cv_cc_needs_belf"; then ++ # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf ++ CFLAGS=$SAVE_CFLAGS ++ fi ++ ;; ++*-*solaris*) ++ # Find out what ABI is being produced by ac_compile, and set linker ++ # options accordingly. ++ echo 'int i;' > conftest.$ac_ext ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 ++ (eval $ac_compile) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; then ++ case `$FILECMD conftest.o` in ++ *64-bit*) ++ case $lt_cv_prog_gnu_ld in ++ yes*) ++ case $host in ++ i?86-*-solaris*|x86_64-*-solaris*) ++ LD="${LD-ld} -m elf_x86_64" ++ ;; ++ sparc*-*-solaris*) ++ LD="${LD-ld} -m elf64_sparc" ++ ;; ++ esac ++ # GNU ld 2.21 introduced _sol2 emulations. Use them if available. ++ if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then ++ LD=${LD-ld}_sol2 ++ fi ++ ;; ++ *) ++ if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then ++ LD="${LD-ld} -64" ++ fi ++ ;; ++ esac ++ ;; ++ esac ++ fi ++ rm -rf conftest* ++ ;; ++esac ++ ++need_locks=$enable_libtool_lock ++ ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. ++set dummy ${ac_tool_prefix}mt; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_MANIFEST_TOOL+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$MANIFEST_TOOL"; then ++ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL ++if test -n "$MANIFEST_TOOL"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 ++printf "%s\n" "$MANIFEST_TOOL" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ++ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL ++ # Extract the first word of "mt", so it can be a program name with args. ++set dummy mt; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_MANIFEST_TOOL+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_MANIFEST_TOOL"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL ++if test -n "$ac_ct_MANIFEST_TOOL"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 ++printf "%s\n" "$ac_ct_MANIFEST_TOOL" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_MANIFEST_TOOL" = x; then ++ MANIFEST_TOOL=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL ++ fi ++else ++ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" ++fi ++ ++test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 ++printf %s "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } ++if test ${lt_cv_path_mainfest_tool+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_path_mainfest_tool=no ++ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 ++ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out ++ cat conftest.err >&5 ++ if $GREP 'Manifest Tool' conftest.out > /dev/null; then ++ lt_cv_path_mainfest_tool=yes ++ fi ++ rm -f conftest* ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 ++printf "%s\n" "$lt_cv_path_mainfest_tool" >&6; } ++if test yes != "$lt_cv_path_mainfest_tool"; then ++ MANIFEST_TOOL=: ++fi ++ ++ ++ ++ ++ ++ ++ case $host_os in ++ rhapsody* | darwin*) ++ if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. ++set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_DSYMUTIL+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$DSYMUTIL"; then ++ ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++DSYMUTIL=$ac_cv_prog_DSYMUTIL ++if test -n "$DSYMUTIL"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 ++printf "%s\n" "$DSYMUTIL" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_DSYMUTIL"; then ++ ac_ct_DSYMUTIL=$DSYMUTIL ++ # Extract the first word of "dsymutil", so it can be a program name with args. ++set dummy dsymutil; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_DSYMUTIL+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_DSYMUTIL"; then ++ ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL ++if test -n "$ac_ct_DSYMUTIL"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 ++printf "%s\n" "$ac_ct_DSYMUTIL" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_DSYMUTIL" = x; then ++ DSYMUTIL=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ DSYMUTIL=$ac_ct_DSYMUTIL ++ fi ++else ++ DSYMUTIL="$ac_cv_prog_DSYMUTIL" ++fi ++ ++ if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. ++set dummy ${ac_tool_prefix}nmedit; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_NMEDIT+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$NMEDIT"; then ++ ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++NMEDIT=$ac_cv_prog_NMEDIT ++if test -n "$NMEDIT"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 ++printf "%s\n" "$NMEDIT" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_NMEDIT"; then ++ ac_ct_NMEDIT=$NMEDIT ++ # Extract the first word of "nmedit", so it can be a program name with args. ++set dummy nmedit; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_NMEDIT+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_NMEDIT"; then ++ ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_NMEDIT="nmedit" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT ++if test -n "$ac_ct_NMEDIT"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 ++printf "%s\n" "$ac_ct_NMEDIT" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_NMEDIT" = x; then ++ NMEDIT=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ NMEDIT=$ac_ct_NMEDIT ++ fi ++else ++ NMEDIT="$ac_cv_prog_NMEDIT" ++fi ++ ++ if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. ++set dummy ${ac_tool_prefix}lipo; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_LIPO+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$LIPO"; then ++ ac_cv_prog_LIPO="$LIPO" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_LIPO="${ac_tool_prefix}lipo" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++LIPO=$ac_cv_prog_LIPO ++if test -n "$LIPO"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 ++printf "%s\n" "$LIPO" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_LIPO"; then ++ ac_ct_LIPO=$LIPO ++ # Extract the first word of "lipo", so it can be a program name with args. ++set dummy lipo; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_LIPO+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_LIPO"; then ++ ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_LIPO="lipo" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO ++if test -n "$ac_ct_LIPO"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 ++printf "%s\n" "$ac_ct_LIPO" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_LIPO" = x; then ++ LIPO=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ LIPO=$ac_ct_LIPO ++ fi ++else ++ LIPO="$ac_cv_prog_LIPO" ++fi ++ ++ if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. ++set dummy ${ac_tool_prefix}otool; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_OTOOL+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$OTOOL"; then ++ ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_OTOOL="${ac_tool_prefix}otool" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++OTOOL=$ac_cv_prog_OTOOL ++if test -n "$OTOOL"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 ++printf "%s\n" "$OTOOL" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_OTOOL"; then ++ ac_ct_OTOOL=$OTOOL ++ # Extract the first word of "otool", so it can be a program name with args. ++set dummy otool; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_OTOOL+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_OTOOL"; then ++ ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_OTOOL="otool" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL ++if test -n "$ac_ct_OTOOL"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 ++printf "%s\n" "$ac_ct_OTOOL" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_OTOOL" = x; then ++ OTOOL=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ OTOOL=$ac_ct_OTOOL ++ fi ++else ++ OTOOL="$ac_cv_prog_OTOOL" ++fi ++ ++ if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. ++set dummy ${ac_tool_prefix}otool64; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_OTOOL64+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$OTOOL64"; then ++ ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++OTOOL64=$ac_cv_prog_OTOOL64 ++if test -n "$OTOOL64"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 ++printf "%s\n" "$OTOOL64" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_OTOOL64"; then ++ ac_ct_OTOOL64=$OTOOL64 ++ # Extract the first word of "otool64", so it can be a program name with args. ++set dummy otool64; ac_word=$2 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++printf %s "checking for $ac_word... " >&6; } ++if test ${ac_cv_prog_ac_ct_OTOOL64+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -n "$ac_ct_OTOOL64"; then ++ ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_OTOOL64="otool64" ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 ++if test -n "$ac_ct_OTOOL64"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 ++printf "%s\n" "$ac_ct_OTOOL64" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ if test "x$ac_ct_OTOOL64" = x; then ++ OTOOL64=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ OTOOL64=$ac_ct_OTOOL64 ++ fi ++else ++ OTOOL64="$ac_cv_prog_OTOOL64" ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 ++printf %s "checking for -single_module linker flag... " >&6; } ++if test ${lt_cv_apple_cc_single_mod+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_apple_cc_single_mod=no ++ if test -z "$LT_MULTI_MODULE"; then ++ # By default we will add the -single_module flag. You can override ++ # by either setting the environment variable LT_MULTI_MODULE ++ # non-empty at configure time, or by adding -multi_module to the ++ # link flags. ++ rm -rf libconftest.dylib* ++ echo "int foo(void){return 1;}" > conftest.c ++ echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ ++-dynamiclib -Wl,-single_module conftest.c" >&5 ++ $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ ++ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err ++ _lt_result=$? ++ # If there is a non-empty error log, and "single_module" ++ # appears in it, assume the flag caused a linker warning ++ if test -s conftest.err && $GREP single_module conftest.err; then ++ cat conftest.err >&5 ++ # Otherwise, if the output was created with a 0 exit code from ++ # the compiler, it worked. ++ elif test -f libconftest.dylib && test 0 = "$_lt_result"; then ++ lt_cv_apple_cc_single_mod=yes ++ else ++ cat conftest.err >&5 ++ fi ++ rm -rf libconftest.dylib* ++ rm -f conftest.* ++ fi ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 ++printf "%s\n" "$lt_cv_apple_cc_single_mod" >&6; } ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 ++printf %s "checking for -exported_symbols_list linker flag... " >&6; } ++if test ${lt_cv_ld_exported_symbols_list+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_ld_exported_symbols_list=no ++ save_LDFLAGS=$LDFLAGS ++ echo "_main" > conftest.sym ++ LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_link "$LINENO" ++then : ++ lt_cv_ld_exported_symbols_list=yes ++else $as_nop ++ lt_cv_ld_exported_symbols_list=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++ LDFLAGS=$save_LDFLAGS ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 ++printf "%s\n" "$lt_cv_ld_exported_symbols_list" >&6; } ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 ++printf %s "checking for -force_load linker flag... " >&6; } ++if test ${lt_cv_ld_force_load+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_ld_force_load=no ++ cat > conftest.c << _LT_EOF ++int forced_loaded() { return 2;} ++_LT_EOF ++ echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 ++ $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 ++ echo "$AR $AR_FLAGS libconftest.a conftest.o" >&5 ++ $AR $AR_FLAGS libconftest.a conftest.o 2>&5 ++ echo "$RANLIB libconftest.a" >&5 ++ $RANLIB libconftest.a 2>&5 ++ cat > conftest.c << _LT_EOF ++int main() { return 0;} ++_LT_EOF ++ echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 ++ $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err ++ _lt_result=$? ++ if test -s conftest.err && $GREP force_load conftest.err; then ++ cat conftest.err >&5 ++ elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then ++ lt_cv_ld_force_load=yes ++ else ++ cat conftest.err >&5 ++ fi ++ rm -f conftest.err libconftest.a conftest conftest.c ++ rm -rf conftest.dSYM ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 ++printf "%s\n" "$lt_cv_ld_force_load" >&6; } ++ case $host_os in ++ rhapsody* | darwin1.[012]) ++ _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; ++ darwin1.*) ++ _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; ++ darwin*) ++ case $MACOSX_DEPLOYMENT_TARGET,$host in ++ 10.[012],*|,*powerpc*-darwin[5-8]*) ++ _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; ++ *) ++ _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; ++ esac ++ ;; ++ esac ++ if test yes = "$lt_cv_apple_cc_single_mod"; then ++ _lt_dar_single_mod='$single_module' ++ fi ++ if test yes = "$lt_cv_ld_exported_symbols_list"; then ++ _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' ++ else ++ _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' ++ fi ++ if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then ++ _lt_dsymutil='~$DSYMUTIL $lib || :' ++ else ++ _lt_dsymutil= ++ fi ++ ;; ++ esac ++ ++# func_munge_path_list VARIABLE PATH ++# ----------------------------------- ++# VARIABLE is name of variable containing _space_ separated list of ++# directories to be munged by the contents of PATH, which is string ++# having a format: ++# "DIR[:DIR]:" ++# string "DIR[ DIR]" will be prepended to VARIABLE ++# ":DIR[:DIR]" ++# string "DIR[ DIR]" will be appended to VARIABLE ++# "DIRP[:DIRP]::[DIRA:]DIRA" ++# string "DIRP[ DIRP]" will be prepended to VARIABLE and string ++# "DIRA[ DIRA]" will be appended to VARIABLE ++# "DIR[:DIR]" ++# VARIABLE will be replaced by "DIR[ DIR]" ++func_munge_path_list () ++{ ++ case x$2 in ++ x) ++ ;; ++ *:) ++ eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" ++ ;; ++ x:*) ++ eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" ++ ;; ++ *::*) ++ eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" ++ eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" ++ ;; ++ *) ++ eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" ++ ;; ++ esac ++} ++ ++ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default ++" ++if test "x$ac_cv_header_dlfcn_h" = xyes ++then : ++ printf "%s\n" "#define HAVE_DLFCN_H 1" >>confdefs.h ++ ++fi ++ ++ ++ ++ ++func_stripname_cnf () ++{ ++ case $2 in ++ .*) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%\\\\$2\$%%"`;; ++ *) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%$2\$%%"`;; ++ esac ++} # func_stripname_cnf ++ ++ ++ ++ ++ ++# Set options ++ ++ ++ ++ enable_dlopen=no ++ ++ ++ enable_win32_dll=no ++ ++ ++ # Check whether --enable-shared was given. ++if test ${enable_shared+y} ++then : ++ enableval=$enable_shared; p=${PACKAGE-default} ++ case $enableval in ++ yes) enable_shared=yes ;; ++ no) enable_shared=no ;; ++ *) ++ enable_shared=no ++ # Look at the argument we got. We use all the common list separators. ++ lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, ++ for pkg in $enableval; do ++ IFS=$lt_save_ifs ++ if test "X$pkg" = "X$p"; then ++ enable_shared=yes ++ fi ++ done ++ IFS=$lt_save_ifs ++ ;; ++ esac ++else $as_nop ++ enable_shared=yes ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ # Check whether --enable-static was given. ++if test ${enable_static+y} ++then : ++ enableval=$enable_static; p=${PACKAGE-default} ++ case $enableval in ++ yes) enable_static=yes ;; ++ no) enable_static=no ;; ++ *) ++ enable_static=no ++ # Look at the argument we got. We use all the common list separators. ++ lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, ++ for pkg in $enableval; do ++ IFS=$lt_save_ifs ++ if test "X$pkg" = "X$p"; then ++ enable_static=yes ++ fi ++ done ++ IFS=$lt_save_ifs ++ ;; ++ esac ++else $as_nop ++ enable_static=yes ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++# Check whether --with-pic was given. ++if test ${with_pic+y} ++then : ++ withval=$with_pic; lt_p=${PACKAGE-default} ++ case $withval in ++ yes|no) pic_mode=$withval ;; ++ *) ++ pic_mode=default ++ # Look at the argument we got. We use all the common list separators. ++ lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, ++ for lt_pkg in $withval; do ++ IFS=$lt_save_ifs ++ if test "X$lt_pkg" = "X$lt_p"; then ++ pic_mode=yes ++ fi ++ done ++ IFS=$lt_save_ifs ++ ;; ++ esac ++else $as_nop ++ pic_mode=default ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ # Check whether --enable-fast-install was given. ++if test ${enable_fast_install+y} ++then : ++ enableval=$enable_fast_install; p=${PACKAGE-default} ++ case $enableval in ++ yes) enable_fast_install=yes ;; ++ no) enable_fast_install=no ;; ++ *) ++ enable_fast_install=no ++ # Look at the argument we got. We use all the common list separators. ++ lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, ++ for pkg in $enableval; do ++ IFS=$lt_save_ifs ++ if test "X$pkg" = "X$p"; then ++ enable_fast_install=yes ++ fi ++ done ++ IFS=$lt_save_ifs ++ ;; ++ esac ++else $as_nop ++ enable_fast_install=yes ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ shared_archive_member_spec= ++case $host,$enable_shared in ++power*-*-aix[5-9]*,yes) ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5 ++printf %s "checking which variant of shared library versioning to provide... " >&6; } ++ ++# Check whether --with-aix-soname was given. ++if test ${with_aix_soname+y} ++then : ++ withval=$with_aix_soname; case $withval in ++ aix|svr4|both) ++ ;; ++ *) ++ as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5 ++ ;; ++ esac ++ lt_cv_with_aix_soname=$with_aix_soname ++else $as_nop ++ if test ${lt_cv_with_aix_soname+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_with_aix_soname=aix ++fi ++ ++ with_aix_soname=$lt_cv_with_aix_soname ++fi ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5 ++printf "%s\n" "$with_aix_soname" >&6; } ++ if test aix != "$with_aix_soname"; then ++ # For the AIX way of multilib, we name the shared archive member ++ # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', ++ # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. ++ # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, ++ # the AIX toolchain works better with OBJECT_MODE set (default 32). ++ if test 64 = "${OBJECT_MODE-32}"; then ++ shared_archive_member_spec=shr_64 ++ else ++ shared_archive_member_spec=shr ++ fi ++ fi ++ ;; ++*) ++ with_aix_soname=aix ++ ;; ++esac ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++# This can be used to rebuild libtool when needed ++LIBTOOL_DEPS=$ltmain ++ ++# Always use our own libtool. ++LIBTOOL='$(SHELL) $(top_builddir)/libtool' ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++test -z "$LN_S" && LN_S="ln -s" ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++if test -n "${ZSH_VERSION+set}"; then ++ setopt NO_GLOB_SUBST ++fi ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 ++printf %s "checking for objdir... " >&6; } ++if test ${lt_cv_objdir+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ rm -f .libs 2>/dev/null ++mkdir .libs 2>/dev/null ++if test -d .libs; then ++ lt_cv_objdir=.libs ++else ++ # MS-DOS does not allow filenames that begin with a dot. ++ lt_cv_objdir=_libs ++fi ++rmdir .libs 2>/dev/null ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 ++printf "%s\n" "$lt_cv_objdir" >&6; } ++objdir=$lt_cv_objdir ++ ++ ++ ++ ++ ++printf "%s\n" "#define LT_OBJDIR \"$lt_cv_objdir/\"" >>confdefs.h ++ ++ ++ ++ ++case $host_os in ++aix3*) ++ # AIX sometimes has problems with the GCC collect2 program. For some ++ # reason, if we set the COLLECT_NAMES environment variable, the problems ++ # vanish in a puff of smoke. ++ if test set != "${COLLECT_NAMES+set}"; then ++ COLLECT_NAMES= ++ export COLLECT_NAMES ++ fi ++ ;; ++esac ++ ++# Global variables: ++ofile=libtool ++can_build_shared=yes ++ ++# All known linkers require a '.a' archive for static linking (except MSVC and ++# ICC, which need '.lib'). ++libext=a ++ ++with_gnu_ld=$lt_cv_prog_gnu_ld ++ ++old_CC=$CC ++old_CFLAGS=$CFLAGS ++ ++# Set sane defaults for various variables ++test -z "$CC" && CC=cc ++test -z "$LTCC" && LTCC=$CC ++test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS ++test -z "$LD" && LD=ld ++test -z "$ac_objext" && ac_objext=o ++ ++func_cc_basename $compiler ++cc_basename=$func_cc_basename_result ++ ++ ++# Only perform the check for file, if the check method requires it ++test -z "$MAGIC_CMD" && MAGIC_CMD=file ++case $deplibs_check_method in ++file_magic*) ++ if test "$file_magic_cmd" = '$MAGIC_CMD'; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 ++printf %s "checking for ${ac_tool_prefix}file... " >&6; } ++if test ${lt_cv_path_MAGIC_CMD+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ case $MAGIC_CMD in ++[\\/*] | ?:[\\/]*) ++ lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. ++ ;; ++*) ++ lt_save_MAGIC_CMD=$MAGIC_CMD ++ lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ++ ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" ++ for ac_dir in $ac_dummy; do ++ IFS=$lt_save_ifs ++ test -z "$ac_dir" && ac_dir=. ++ if test -f "$ac_dir/${ac_tool_prefix}file"; then ++ lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file" ++ if test -n "$file_magic_test_file"; then ++ case $deplibs_check_method in ++ "file_magic "*) ++ file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` ++ MAGIC_CMD=$lt_cv_path_MAGIC_CMD ++ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | ++ $EGREP "$file_magic_regex" > /dev/null; then ++ : ++ else ++ cat <<_LT_EOF 1>&2 ++ ++*** Warning: the command libtool uses to detect shared libraries, ++*** $file_magic_cmd, produces output that libtool cannot recognize. ++*** The result is that libtool may fail to recognize shared libraries ++*** as such. This will affect the creation of libtool libraries that ++*** depend on shared libraries, but programs linked with such libtool ++*** libraries will work regardless of this problem. Nevertheless, you ++*** may want to report the problem to your system manager and/or to ++*** bug-libtool@gnu.org ++ ++_LT_EOF ++ fi ;; ++ esac ++ fi ++ break ++ fi ++ done ++ IFS=$lt_save_ifs ++ MAGIC_CMD=$lt_save_MAGIC_CMD ++ ;; ++esac ++fi ++ ++MAGIC_CMD=$lt_cv_path_MAGIC_CMD ++if test -n "$MAGIC_CMD"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 ++printf "%s\n" "$MAGIC_CMD" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ ++ ++ ++if test -z "$lt_cv_path_MAGIC_CMD"; then ++ if test -n "$ac_tool_prefix"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for file" >&5 ++printf %s "checking for file... " >&6; } ++if test ${lt_cv_path_MAGIC_CMD+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ case $MAGIC_CMD in ++[\\/*] | ?:[\\/]*) ++ lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. ++ ;; ++*) ++ lt_save_MAGIC_CMD=$MAGIC_CMD ++ lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ++ ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" ++ for ac_dir in $ac_dummy; do ++ IFS=$lt_save_ifs ++ test -z "$ac_dir" && ac_dir=. ++ if test -f "$ac_dir/file"; then ++ lt_cv_path_MAGIC_CMD=$ac_dir/"file" ++ if test -n "$file_magic_test_file"; then ++ case $deplibs_check_method in ++ "file_magic "*) ++ file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` ++ MAGIC_CMD=$lt_cv_path_MAGIC_CMD ++ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | ++ $EGREP "$file_magic_regex" > /dev/null; then ++ : ++ else ++ cat <<_LT_EOF 1>&2 ++ ++*** Warning: the command libtool uses to detect shared libraries, ++*** $file_magic_cmd, produces output that libtool cannot recognize. ++*** The result is that libtool may fail to recognize shared libraries ++*** as such. This will affect the creation of libtool libraries that ++*** depend on shared libraries, but programs linked with such libtool ++*** libraries will work regardless of this problem. Nevertheless, you ++*** may want to report the problem to your system manager and/or to ++*** bug-libtool@gnu.org ++ ++_LT_EOF ++ fi ;; ++ esac ++ fi ++ break ++ fi ++ done ++ IFS=$lt_save_ifs ++ MAGIC_CMD=$lt_save_MAGIC_CMD ++ ;; ++esac ++fi ++ ++MAGIC_CMD=$lt_cv_path_MAGIC_CMD ++if test -n "$MAGIC_CMD"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 ++printf "%s\n" "$MAGIC_CMD" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++ ++ ++ else ++ MAGIC_CMD=: ++ fi ++fi ++ ++ fi ++ ;; ++esac ++ ++# Use C for the default configuration in the libtool script ++ ++lt_save_CC=$CC ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++ ++# Source file extension for C test sources. ++ac_ext=c ++ ++# Object file extension for compiled C test sources. ++objext=o ++objext=$objext ++ ++# Code to be used in simple compile tests ++lt_simple_compile_test_code="int some_variable = 0;" ++ ++# Code to be used in simple link tests ++lt_simple_link_test_code='int main(){return(0);}' ++ ++ ++ ++ ++ ++ ++ ++# If no C compiler was specified, use CC. ++LTCC=${LTCC-"$CC"} ++ ++# If no C compiler flags were specified, use CFLAGS. ++LTCFLAGS=${LTCFLAGS-"$CFLAGS"} ++ ++# Allow CC to be a program name with arguments. ++compiler=$CC ++ ++# Save the default compiler, since it gets overwritten when the other ++# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. ++compiler_DEFAULT=$CC ++ ++# save warnings/boilerplate of simple test code ++ac_outfile=conftest.$ac_objext ++echo "$lt_simple_compile_test_code" >conftest.$ac_ext ++eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err ++_lt_compiler_boilerplate=`cat conftest.err` ++$RM conftest* ++ ++ac_outfile=conftest.$ac_objext ++echo "$lt_simple_link_test_code" >conftest.$ac_ext ++eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err ++_lt_linker_boilerplate=`cat conftest.err` ++$RM -r conftest* ++ ++ ++if test -n "$compiler"; then ++ ++lt_prog_compiler_no_builtin_flag= ++ ++if test yes = "$GCC"; then ++ case $cc_basename in ++ nvcc*) ++ lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; ++ *) ++ lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; ++ esac ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 ++printf %s "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } ++if test ${lt_cv_prog_compiler_rtti_exceptions+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_prog_compiler_rtti_exceptions=no ++ ac_outfile=conftest.$ac_objext ++ echo "$lt_simple_compile_test_code" > conftest.$ac_ext ++ lt_compiler_flag="-fno-rtti -fno-exceptions" ## exclude from sc_useless_quotes_in_assignment ++ # Insert the option either (1) after the last *FLAGS variable, or ++ # (2) before a word containing "conftest.", or (3) at the end. ++ # Note that $ac_compile itself does not contain backslashes and begins ++ # with a dollar sign (not a hyphen), so the echo should work correctly. ++ # The option is referenced via a variable to avoid confusing sed. ++ lt_compile=`echo "$ac_compile" | $SED \ ++ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ ++ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ ++ -e 's:$: $lt_compiler_flag:'` ++ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) ++ (eval "$lt_compile" 2>conftest.err) ++ ac_status=$? ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ if (exit $ac_status) && test -s "$ac_outfile"; then ++ # The compiler can only warn and ignore the option if not recognized ++ # So say no if there are warnings other than the usual output. ++ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp ++ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 ++ if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then ++ lt_cv_prog_compiler_rtti_exceptions=yes ++ fi ++ fi ++ $RM conftest* ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 ++printf "%s\n" "$lt_cv_prog_compiler_rtti_exceptions" >&6; } ++ ++if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then ++ lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" ++else ++ : ++fi ++ ++fi ++ ++ ++ ++ ++ ++ ++ lt_prog_compiler_wl= ++lt_prog_compiler_pic= ++lt_prog_compiler_static= ++ ++ ++ if test yes = "$GCC"; then ++ lt_prog_compiler_wl='-Wl,' ++ lt_prog_compiler_static='-static' ++ ++ case $host_os in ++ aix*) ++ # All AIX code is PIC. ++ if test ia64 = "$host_cpu"; then ++ # AIX 5 now supports IA64 processor ++ lt_prog_compiler_static='-Bstatic' ++ fi ++ lt_prog_compiler_pic='-fPIC' ++ ;; ++ ++ amigaos*) ++ case $host_cpu in ++ powerpc) ++ # see comment about AmigaOS4 .so support ++ lt_prog_compiler_pic='-fPIC' ++ ;; ++ m68k) ++ # FIXME: we need at least 68020 code to build shared libraries, but ++ # adding the '-m68020' flag to GCC prevents building anything better, ++ # like '-m68040'. ++ lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' ++ ;; ++ esac ++ ;; ++ ++ beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) ++ # PIC is the default for these OSes. ++ ;; ++ ++ mingw* | cygwin* | pw32* | os2* | cegcc*) ++ # This hack is so that the source file can tell whether it is being ++ # built for inclusion in a dll (and should export symbols for example). ++ # Although the cygwin gcc ignores -fPIC, still need this for old-style ++ # (--disable-auto-import) libraries ++ lt_prog_compiler_pic='-DDLL_EXPORT' ++ case $host_os in ++ os2*) ++ lt_prog_compiler_static='$wl-static' ++ ;; ++ esac ++ ;; ++ ++ darwin* | rhapsody*) ++ # PIC is the default on this platform ++ # Common symbols not allowed in MH_DYLIB files ++ lt_prog_compiler_pic='-fno-common' ++ ;; ++ ++ haiku*) ++ # PIC is the default for Haiku. ++ # The "-static" flag exists, but is broken. ++ lt_prog_compiler_static= ++ ;; ++ ++ hpux*) ++ # PIC is the default for 64-bit PA HP-UX, but not for 32-bit ++ # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag ++ # sets the default TLS model and affects inlining. ++ case $host_cpu in ++ hppa*64*) ++ # +Z the default ++ ;; ++ *) ++ lt_prog_compiler_pic='-fPIC' ++ ;; ++ esac ++ ;; ++ ++ interix[3-9]*) ++ # Interix 3.x gcc -fpic/-fPIC options generate broken code. ++ # Instead, we relocate shared libraries at runtime. ++ ;; ++ ++ msdosdjgpp*) ++ # Just because we use GCC doesn't mean we suddenly get shared libraries ++ # on systems that don't support them. ++ lt_prog_compiler_can_build_shared=no ++ enable_shared=no ++ ;; ++ ++ *nto* | *qnx*) ++ # QNX uses GNU C++, but need to define -shared option too, otherwise ++ # it will coredump. ++ lt_prog_compiler_pic='-fPIC -shared' ++ ;; ++ ++ sysv4*MP*) ++ if test -d /usr/nec; then ++ lt_prog_compiler_pic=-Kconform_pic ++ fi ++ ;; ++ ++ *) ++ lt_prog_compiler_pic='-fPIC' ++ ;; ++ esac ++ ++ case $cc_basename in ++ nvcc*) # Cuda Compiler Driver 2.2 ++ lt_prog_compiler_wl='-Xlinker ' ++ if test -n "$lt_prog_compiler_pic"; then ++ lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" ++ fi ++ ;; ++ esac ++ else ++ # PORTME Check for flag to pass linker flags through the system compiler. ++ case $host_os in ++ aix*) ++ lt_prog_compiler_wl='-Wl,' ++ if test ia64 = "$host_cpu"; then ++ # AIX 5 now supports IA64 processor ++ lt_prog_compiler_static='-Bstatic' ++ else ++ lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' ++ fi ++ ;; ++ ++ darwin* | rhapsody*) ++ # PIC is the default on this platform ++ # Common symbols not allowed in MH_DYLIB files ++ lt_prog_compiler_pic='-fno-common' ++ case $cc_basename in ++ nagfor*) ++ # NAG Fortran compiler ++ lt_prog_compiler_wl='-Wl,-Wl,,' ++ lt_prog_compiler_pic='-PIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; ++ esac ++ ;; ++ ++ mingw* | cygwin* | pw32* | os2* | cegcc*) ++ # This hack is so that the source file can tell whether it is being ++ # built for inclusion in a dll (and should export symbols for example). ++ lt_prog_compiler_pic='-DDLL_EXPORT' ++ case $host_os in ++ os2*) ++ lt_prog_compiler_static='$wl-static' ++ ;; ++ esac ++ ;; ++ ++ hpux9* | hpux10* | hpux11*) ++ lt_prog_compiler_wl='-Wl,' ++ # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but ++ # not for PA HP-UX. ++ case $host_cpu in ++ hppa*64*|ia64*) ++ # +Z the default ++ ;; ++ *) ++ lt_prog_compiler_pic='+Z' ++ ;; ++ esac ++ # Is there a better lt_prog_compiler_static that works with the bundled CC? ++ lt_prog_compiler_static='$wl-a ${wl}archive' ++ ;; ++ ++ irix5* | irix6* | nonstopux*) ++ lt_prog_compiler_wl='-Wl,' ++ # PIC (with -KPIC) is the default. ++ lt_prog_compiler_static='-non_shared' ++ ;; ++ ++ linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ case $cc_basename in ++ # old Intel for x86_64, which still supported -KPIC. ++ ecc*) ++ lt_prog_compiler_wl='-Wl,' ++ lt_prog_compiler_pic='-KPIC' ++ lt_prog_compiler_static='-static' ++ ;; ++ # icc used to be incompatible with GCC. ++ # ICC 10 doesn't accept -KPIC any more. ++ icc* | ifort*) ++ lt_prog_compiler_wl='-Wl,' ++ lt_prog_compiler_pic='-fPIC' ++ lt_prog_compiler_static='-static' ++ ;; ++ # Lahey Fortran 8.1. ++ lf95*) ++ lt_prog_compiler_wl='-Wl,' ++ lt_prog_compiler_pic='--shared' ++ lt_prog_compiler_static='--static' ++ ;; ++ nagfor*) ++ # NAG Fortran compiler ++ lt_prog_compiler_wl='-Wl,-Wl,,' ++ lt_prog_compiler_pic='-PIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; ++ tcc*) ++ # Fabrice Bellard et al's Tiny C Compiler ++ lt_prog_compiler_wl='-Wl,' ++ lt_prog_compiler_pic='-fPIC' ++ lt_prog_compiler_static='-static' ++ ;; ++ pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) ++ # Portland Group compilers (*not* the Pentium gcc compiler, ++ # which looks to be a dead project) ++ lt_prog_compiler_wl='-Wl,' ++ lt_prog_compiler_pic='-fpic' ++ lt_prog_compiler_static='-Bstatic' ++ ;; ++ ccc*) ++ lt_prog_compiler_wl='-Wl,' ++ # All Alpha code is PIC. ++ lt_prog_compiler_static='-non_shared' ++ ;; ++ xl* | bgxl* | bgf* | mpixl*) ++ # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene ++ lt_prog_compiler_wl='-Wl,' ++ lt_prog_compiler_pic='-qpic' ++ lt_prog_compiler_static='-qstaticlink' ++ ;; ++ *) ++ case `$CC -V 2>&1 | $SED 5q` in ++ *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) ++ # Sun Fortran 8.3 passes all unrecognized flags to the linker ++ lt_prog_compiler_pic='-KPIC' ++ lt_prog_compiler_static='-Bstatic' ++ lt_prog_compiler_wl='' ++ ;; ++ *Sun\ F* | *Sun*Fortran*) ++ lt_prog_compiler_pic='-KPIC' ++ lt_prog_compiler_static='-Bstatic' ++ lt_prog_compiler_wl='-Qoption ld ' ++ ;; ++ *Sun\ C*) ++ # Sun C 5.9 ++ lt_prog_compiler_pic='-KPIC' ++ lt_prog_compiler_static='-Bstatic' ++ lt_prog_compiler_wl='-Wl,' ++ ;; ++ *Intel*\ [CF]*Compiler*) ++ lt_prog_compiler_wl='-Wl,' ++ lt_prog_compiler_pic='-fPIC' ++ lt_prog_compiler_static='-static' ++ ;; ++ *Portland\ Group*) ++ lt_prog_compiler_wl='-Wl,' ++ lt_prog_compiler_pic='-fpic' ++ lt_prog_compiler_static='-Bstatic' ++ ;; ++ esac ++ ;; ++ esac ++ ;; ++ ++ newsos6) ++ lt_prog_compiler_pic='-KPIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; ++ ++ *nto* | *qnx*) ++ # QNX uses GNU C++, but need to define -shared option too, otherwise ++ # it will coredump. ++ lt_prog_compiler_pic='-fPIC -shared' ++ ;; ++ ++ osf3* | osf4* | osf5*) ++ lt_prog_compiler_wl='-Wl,' ++ # All OSF/1 code is PIC. ++ lt_prog_compiler_static='-non_shared' ++ ;; ++ ++ rdos*) ++ lt_prog_compiler_static='-non_shared' ++ ;; ++ ++ solaris*) ++ lt_prog_compiler_pic='-KPIC' ++ lt_prog_compiler_static='-Bstatic' ++ case $cc_basename in ++ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) ++ lt_prog_compiler_wl='-Qoption ld ';; ++ *) ++ lt_prog_compiler_wl='-Wl,';; ++ esac ++ ;; ++ ++ sunos4*) ++ lt_prog_compiler_wl='-Qoption ld ' ++ lt_prog_compiler_pic='-PIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; ++ ++ sysv4 | sysv4.2uw2* | sysv4.3*) ++ lt_prog_compiler_wl='-Wl,' ++ lt_prog_compiler_pic='-KPIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; ++ ++ sysv4*MP*) ++ if test -d /usr/nec; then ++ lt_prog_compiler_pic='-Kconform_pic' ++ lt_prog_compiler_static='-Bstatic' ++ fi ++ ;; ++ ++ sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) ++ lt_prog_compiler_wl='-Wl,' ++ lt_prog_compiler_pic='-KPIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; ++ ++ unicos*) ++ lt_prog_compiler_wl='-Wl,' ++ lt_prog_compiler_can_build_shared=no ++ ;; ++ ++ uts4*) ++ lt_prog_compiler_pic='-pic' ++ lt_prog_compiler_static='-Bstatic' ++ ;; ++ ++ *) ++ lt_prog_compiler_can_build_shared=no ++ ;; ++ esac ++ fi ++ ++case $host_os in ++ # For platforms that do not support PIC, -DPIC is meaningless: ++ *djgpp*) ++ lt_prog_compiler_pic= ++ ;; ++ *) ++ lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" ++ ;; ++esac ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 ++printf %s "checking for $compiler option to produce PIC... " >&6; } ++if test ${lt_cv_prog_compiler_pic+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 ++printf "%s\n" "$lt_cv_prog_compiler_pic" >&6; } ++lt_prog_compiler_pic=$lt_cv_prog_compiler_pic ++ ++# ++# Check to make sure the PIC flag actually works. ++# ++if test -n "$lt_prog_compiler_pic"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 ++printf %s "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } ++if test ${lt_cv_prog_compiler_pic_works+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_prog_compiler_pic_works=no ++ ac_outfile=conftest.$ac_objext ++ echo "$lt_simple_compile_test_code" > conftest.$ac_ext ++ lt_compiler_flag="$lt_prog_compiler_pic -DPIC" ## exclude from sc_useless_quotes_in_assignment ++ # Insert the option either (1) after the last *FLAGS variable, or ++ # (2) before a word containing "conftest.", or (3) at the end. ++ # Note that $ac_compile itself does not contain backslashes and begins ++ # with a dollar sign (not a hyphen), so the echo should work correctly. ++ # The option is referenced via a variable to avoid confusing sed. ++ lt_compile=`echo "$ac_compile" | $SED \ ++ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ ++ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ ++ -e 's:$: $lt_compiler_flag:'` ++ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) ++ (eval "$lt_compile" 2>conftest.err) ++ ac_status=$? ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ if (exit $ac_status) && test -s "$ac_outfile"; then ++ # The compiler can only warn and ignore the option if not recognized ++ # So say no if there are warnings other than the usual output. ++ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp ++ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 ++ if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then ++ lt_cv_prog_compiler_pic_works=yes ++ fi ++ fi ++ $RM conftest* ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 ++printf "%s\n" "$lt_cv_prog_compiler_pic_works" >&6; } ++ ++if test yes = "$lt_cv_prog_compiler_pic_works"; then ++ case $lt_prog_compiler_pic in ++ "" | " "*) ;; ++ *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; ++ esac ++else ++ lt_prog_compiler_pic= ++ lt_prog_compiler_can_build_shared=no ++fi ++ ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++# ++# Check to make sure the static flag actually works. ++# ++wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 ++printf %s "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } ++if test ${lt_cv_prog_compiler_static_works+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_prog_compiler_static_works=no ++ save_LDFLAGS=$LDFLAGS ++ LDFLAGS="$LDFLAGS $lt_tmp_static_flag" ++ echo "$lt_simple_link_test_code" > conftest.$ac_ext ++ if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then ++ # The linker can only warn and ignore the option if not recognized ++ # So say no if there are warnings ++ if test -s conftest.err; then ++ # Append any errors to the config.log. ++ cat conftest.err 1>&5 ++ $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp ++ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 ++ if diff conftest.exp conftest.er2 >/dev/null; then ++ lt_cv_prog_compiler_static_works=yes ++ fi ++ else ++ lt_cv_prog_compiler_static_works=yes ++ fi ++ fi ++ $RM -r conftest* ++ LDFLAGS=$save_LDFLAGS ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 ++printf "%s\n" "$lt_cv_prog_compiler_static_works" >&6; } ++ ++if test yes = "$lt_cv_prog_compiler_static_works"; then ++ : ++else ++ lt_prog_compiler_static= ++fi ++ ++ ++ ++ ++ ++ ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 ++printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } ++if test ${lt_cv_prog_compiler_c_o+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_prog_compiler_c_o=no ++ $RM -r conftest 2>/dev/null ++ mkdir conftest ++ cd conftest ++ mkdir out ++ echo "$lt_simple_compile_test_code" > conftest.$ac_ext ++ ++ lt_compiler_flag="-o out/conftest2.$ac_objext" ++ # Insert the option either (1) after the last *FLAGS variable, or ++ # (2) before a word containing "conftest.", or (3) at the end. ++ # Note that $ac_compile itself does not contain backslashes and begins ++ # with a dollar sign (not a hyphen), so the echo should work correctly. ++ lt_compile=`echo "$ac_compile" | $SED \ ++ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ ++ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ ++ -e 's:$: $lt_compiler_flag:'` ++ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) ++ (eval "$lt_compile" 2>out/conftest.err) ++ ac_status=$? ++ cat out/conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ if (exit $ac_status) && test -s out/conftest2.$ac_objext ++ then ++ # The compiler can only warn and ignore the option if not recognized ++ # So say no if there are warnings ++ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp ++ $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 ++ if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then ++ lt_cv_prog_compiler_c_o=yes ++ fi ++ fi ++ chmod u+w . 2>&5 ++ $RM conftest* ++ # SGI C++ compiler will create directory out/ii_files/ for ++ # template instantiation ++ test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files ++ $RM out/* && rmdir out ++ cd .. ++ $RM -r conftest ++ $RM conftest* ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 ++printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; } ++ ++ ++ ++ ++ ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 ++printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } ++if test ${lt_cv_prog_compiler_c_o+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_prog_compiler_c_o=no ++ $RM -r conftest 2>/dev/null ++ mkdir conftest ++ cd conftest ++ mkdir out ++ echo "$lt_simple_compile_test_code" > conftest.$ac_ext ++ ++ lt_compiler_flag="-o out/conftest2.$ac_objext" ++ # Insert the option either (1) after the last *FLAGS variable, or ++ # (2) before a word containing "conftest.", or (3) at the end. ++ # Note that $ac_compile itself does not contain backslashes and begins ++ # with a dollar sign (not a hyphen), so the echo should work correctly. ++ lt_compile=`echo "$ac_compile" | $SED \ ++ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ ++ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ ++ -e 's:$: $lt_compiler_flag:'` ++ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) ++ (eval "$lt_compile" 2>out/conftest.err) ++ ac_status=$? ++ cat out/conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ if (exit $ac_status) && test -s out/conftest2.$ac_objext ++ then ++ # The compiler can only warn and ignore the option if not recognized ++ # So say no if there are warnings ++ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp ++ $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 ++ if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then ++ lt_cv_prog_compiler_c_o=yes ++ fi ++ fi ++ chmod u+w . 2>&5 ++ $RM conftest* ++ # SGI C++ compiler will create directory out/ii_files/ for ++ # template instantiation ++ test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files ++ $RM out/* && rmdir out ++ cd .. ++ $RM -r conftest ++ $RM conftest* ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 ++printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; } ++ ++ ++ ++ ++hard_links=nottested ++if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then ++ # do not overwrite the value of need_locks provided by the user ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 ++printf %s "checking if we can lock with hard links... " >&6; } ++ hard_links=yes ++ $RM conftest* ++ ln conftest.a conftest.b 2>/dev/null && hard_links=no ++ touch conftest.a ++ ln conftest.a conftest.b 2>&5 || hard_links=no ++ ln conftest.a conftest.b 2>/dev/null && hard_links=no ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 ++printf "%s\n" "$hard_links" >&6; } ++ if test no = "$hard_links"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 ++printf "%s\n" "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} ++ need_locks=warn ++ fi ++else ++ need_locks=no ++fi ++ ++ ++ ++ ++ ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 ++printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } ++ ++ runpath_var= ++ allow_undefined_flag= ++ always_export_symbols=no ++ archive_cmds= ++ archive_expsym_cmds= ++ compiler_needs_object=no ++ enable_shared_with_static_runtimes=no ++ export_dynamic_flag_spec= ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ++ hardcode_automatic=no ++ hardcode_direct=no ++ hardcode_direct_absolute=no ++ hardcode_libdir_flag_spec= ++ hardcode_libdir_separator= ++ hardcode_minus_L=no ++ hardcode_shlibpath_var=unsupported ++ inherit_rpath=no ++ link_all_deplibs=unknown ++ module_cmds= ++ module_expsym_cmds= ++ old_archive_from_new_cmds= ++ old_archive_from_expsyms_cmds= ++ thread_safe_flag_spec= ++ whole_archive_flag_spec= ++ # include_expsyms should be a list of space-separated symbols to be *always* ++ # included in the symbol list ++ include_expsyms= ++ # exclude_expsyms can be an extended regexp of symbols to exclude ++ # it will be wrapped by ' (' and ')$', so one must not match beginning or ++ # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', ++ # as well as any symbol that contains 'd'. ++ exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' ++ # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out ++ # platforms (ab)use it in PIC code, but their linkers get confused if ++ # the symbol is explicitly referenced. Since portable code cannot ++ # rely on this symbol name, it's probably fine to never include it in ++ # preloaded symbol tables. ++ # Exclude shared library initialization/finalization symbols. ++ extract_expsyms_cmds= ++ ++ case $host_os in ++ cygwin* | mingw* | pw32* | cegcc*) ++ # FIXME: the MSVC++ and ICC port hasn't been tested in a loooong time ++ # When not using gcc, we currently assume that we are using ++ # Microsoft Visual C++ or Intel C++ Compiler. ++ if test yes != "$GCC"; then ++ with_gnu_ld=no ++ fi ++ ;; ++ interix*) ++ # we just hope/assume this is gcc and not c89 (= MSVC++ or ICC) ++ with_gnu_ld=yes ++ ;; ++ openbsd* | bitrig*) ++ with_gnu_ld=no ++ ;; ++ esac ++ ++ ld_shlibs=yes ++ ++ # On some targets, GNU ld is compatible enough with the native linker ++ # that we're better off using the native interface for both. ++ lt_use_gnu_ld_interface=no ++ if test yes = "$with_gnu_ld"; then ++ case $host_os in ++ aix*) ++ # The AIX port of GNU ld has always aspired to compatibility ++ # with the native linker. However, as the warning in the GNU ld ++ # block says, versions before 2.19.5* couldn't really create working ++ # shared libraries, regardless of the interface used. ++ case `$LD -v 2>&1` in ++ *\ \(GNU\ Binutils\)\ 2.19.5*) ;; ++ *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; ++ *\ \(GNU\ Binutils\)\ [3-9]*) ;; ++ *) ++ lt_use_gnu_ld_interface=yes ++ ;; ++ esac ++ ;; ++ *) ++ lt_use_gnu_ld_interface=yes ++ ;; ++ esac ++ fi ++ ++ if test yes = "$lt_use_gnu_ld_interface"; then ++ # If archive_cmds runs LD, not CC, wlarc should be empty ++ wlarc='$wl' ++ ++ # Set some defaults for GNU ld with shared library support. These ++ # are reset later if shared libraries are not supported. Putting them ++ # here allows them to be overridden if necessary. ++ runpath_var=LD_RUN_PATH ++ hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' ++ export_dynamic_flag_spec='$wl--export-dynamic' ++ # ancient GNU ld didn't support --whole-archive et. al. ++ if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then ++ whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' ++ else ++ whole_archive_flag_spec= ++ fi ++ supports_anon_versioning=no ++ case `$LD -v | $SED -e 's/([^)]\+)\s\+//' 2>&1` in ++ *GNU\ gold*) supports_anon_versioning=yes ;; ++ *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 ++ *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... ++ *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... ++ *\ 2.11.*) ;; # other 2.11 versions ++ *) supports_anon_versioning=yes ;; ++ esac ++ ++ # See if GNU ld supports shared libraries. ++ case $host_os in ++ aix[3-9]*) ++ # On AIX/PPC, the GNU linker is very broken ++ if test ia64 != "$host_cpu"; then ++ ld_shlibs=no ++ cat <<_LT_EOF 1>&2 ++ ++*** Warning: the GNU linker, at least up to release 2.19, is reported ++*** to be unable to reliably create shared libraries on AIX. ++*** Therefore, libtool is disabling shared libraries support. If you ++*** really care for shared libraries, you may want to install binutils ++*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. ++*** You will then need to restart the configuration process. ++ ++_LT_EOF ++ fi ++ ;; ++ ++ amigaos*) ++ case $host_cpu in ++ powerpc) ++ # see comment about AmigaOS4 .so support ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_expsym_cmds='' ++ ;; ++ m68k) ++ archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' ++ hardcode_libdir_flag_spec='-L$libdir' ++ hardcode_minus_L=yes ++ ;; ++ esac ++ ;; ++ ++ beos*) ++ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then ++ allow_undefined_flag=unsupported ++ # Joseph Beckenbach says some releases of gcc ++ # support --undefined. This deserves some investigation. FIXME ++ archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ else ++ ld_shlibs=no ++ fi ++ ;; ++ ++ cygwin* | mingw* | pw32* | cegcc*) ++ # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, ++ # as there is no search path for DLLs. ++ hardcode_libdir_flag_spec='-L$libdir' ++ export_dynamic_flag_spec='$wl--export-all-symbols' ++ allow_undefined_flag=unsupported ++ always_export_symbols=no ++ enable_shared_with_static_runtimes=yes ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' ++ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' ++ ++ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ # If the export-symbols file already is a .def file, use it as ++ # is; otherwise, prepend EXPORTS... ++ archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then ++ cp $export_symbols $output_objdir/$soname.def; ++ else ++ echo EXPORTS > $output_objdir/$soname.def; ++ cat $export_symbols >> $output_objdir/$soname.def; ++ fi~ ++ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ else ++ ld_shlibs=no ++ fi ++ ;; ++ ++ haiku*) ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ link_all_deplibs=yes ++ ;; ++ ++ os2*) ++ hardcode_libdir_flag_spec='-L$libdir' ++ hardcode_minus_L=yes ++ allow_undefined_flag=unsupported ++ shrext_cmds=.dll ++ archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ ++ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ ++ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ ++ $ECHO EXPORTS >> $output_objdir/$libname.def~ ++ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ ++ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ ++ emximp -o $lib $output_objdir/$libname.def' ++ archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ ++ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ ++ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ ++ $ECHO EXPORTS >> $output_objdir/$libname.def~ ++ prefix_cmds="$SED"~ ++ if test EXPORTS = "`$SED 1q $export_symbols`"; then ++ prefix_cmds="$prefix_cmds -e 1d"; ++ fi~ ++ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ ++ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ ++ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ ++ emximp -o $lib $output_objdir/$libname.def' ++ old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' ++ enable_shared_with_static_runtimes=yes ++ file_list_spec='@' ++ ;; ++ ++ interix[3-9]*) ++ hardcode_direct=no ++ hardcode_shlibpath_var=no ++ hardcode_libdir_flag_spec='$wl-rpath,$libdir' ++ export_dynamic_flag_spec='$wl-E' ++ # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. ++ # Instead, shared libraries are loaded at an image base (0x10000000 by ++ # default) and relocated if they conflict, which is a slow very memory ++ # consuming and fragmenting process. To avoid this, we pick a random, ++ # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link ++ # time. Moving up from 0x10000000 also allows more sbrk(2) space. ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ++ archive_expsym_cmds='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ++ ;; ++ ++ gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) ++ tmp_diet=no ++ if test linux-dietlibc = "$host_os"; then ++ case $cc_basename in ++ diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) ++ esac ++ fi ++ if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ ++ && test no = "$tmp_diet" ++ then ++ tmp_addflag=' $pic_flag' ++ tmp_sharedflag='-shared' ++ case $cc_basename,$host_cpu in ++ pgcc*) # Portland Group C compiler ++ whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ tmp_addflag=' $pic_flag' ++ ;; ++ pgf77* | pgf90* | pgf95* | pgfortran*) ++ # Portland Group f77 and f90 compilers ++ whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ tmp_addflag=' $pic_flag -Mnomain' ;; ++ ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 ++ tmp_addflag=' -i_dynamic' ;; ++ efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 ++ tmp_addflag=' -i_dynamic -nofor_main' ;; ++ ifc* | ifort*) # Intel Fortran compiler ++ tmp_addflag=' -nofor_main' ;; ++ lf95*) # Lahey Fortran 8.1 ++ whole_archive_flag_spec= ++ tmp_sharedflag='--shared' ;; ++ nagfor*) # NAGFOR 5.3 ++ tmp_sharedflag='-Wl,-shared' ;; ++ xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) ++ tmp_sharedflag='-qmkshrobj' ++ tmp_addflag= ;; ++ nvcc*) # Cuda Compiler Driver 2.2 ++ whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ compiler_needs_object=yes ++ ;; ++ esac ++ case `$CC -V 2>&1 | $SED 5q` in ++ *Sun\ C*) # Sun C 5.9 ++ whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ compiler_needs_object=yes ++ tmp_sharedflag='-G' ;; ++ *Sun\ F*) # Sun Fortran 8.3 ++ tmp_sharedflag='-G' ;; ++ esac ++ archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ ++ if test yes = "$supports_anon_versioning"; then ++ archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ ++ cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ ++ echo "local: *; };" >> $output_objdir/$libname.ver~ ++ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' ++ fi ++ ++ case $cc_basename in ++ tcc*) ++ export_dynamic_flag_spec='-rdynamic' ++ ;; ++ xlf* | bgf* | bgxlf* | mpixlf*) ++ # IBM XL Fortran 10.1 on PPC cannot create shared libs itself ++ whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' ++ hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' ++ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' ++ if test yes = "$supports_anon_versioning"; then ++ archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ ++ cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ ++ echo "local: *; };" >> $output_objdir/$libname.ver~ ++ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' ++ fi ++ ;; ++ esac ++ else ++ ld_shlibs=no ++ fi ++ ;; ++ ++ netbsd*) ++ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then ++ archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' ++ wlarc= ++ else ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ fi ++ ;; ++ ++ solaris*) ++ if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then ++ ld_shlibs=no ++ cat <<_LT_EOF 1>&2 ++ ++*** Warning: The releases 2.8.* of the GNU linker cannot reliably ++*** create shared libraries on Solaris systems. Therefore, libtool ++*** is disabling shared libraries support. We urge you to upgrade GNU ++*** binutils to release 2.9.1 or newer. Another option is to modify ++*** your PATH or compiler configuration so that the native linker is ++*** used, and then restart. ++ ++_LT_EOF ++ elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ else ++ ld_shlibs=no ++ fi ++ ;; ++ ++ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) ++ case `$LD -v 2>&1` in ++ *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) ++ ld_shlibs=no ++ cat <<_LT_EOF 1>&2 ++ ++*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot ++*** reliably create shared libraries on SCO systems. Therefore, libtool ++*** is disabling shared libraries support. We urge you to upgrade GNU ++*** binutils to release 2.16.91.0.3 or newer. Another option is to modify ++*** your PATH or compiler configuration so that the native linker is ++*** used, and then restart. ++ ++_LT_EOF ++ ;; ++ *) ++ # For security reasons, it is highly recommended that you always ++ # use absolute paths for naming shared libraries, and exclude the ++ # DT_RUNPATH tag from executables and libraries. But doing so ++ # requires that you compile everything twice, which is a pain. ++ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then ++ hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ else ++ ld_shlibs=no ++ fi ++ ;; ++ esac ++ ;; ++ ++ sunos4*) ++ archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' ++ wlarc= ++ hardcode_direct=yes ++ hardcode_shlibpath_var=no ++ ;; ++ ++ *) ++ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ else ++ ld_shlibs=no ++ fi ++ ;; ++ esac ++ ++ if test no = "$ld_shlibs"; then ++ runpath_var= ++ hardcode_libdir_flag_spec= ++ export_dynamic_flag_spec= ++ whole_archive_flag_spec= ++ fi ++ else ++ # PORTME fill in a description of your system's linker (not GNU ld) ++ case $host_os in ++ aix3*) ++ allow_undefined_flag=unsupported ++ always_export_symbols=yes ++ archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' ++ # Note: this linker hardcodes the directories in LIBPATH if there ++ # are no directories specified by -L. ++ hardcode_minus_L=yes ++ if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then ++ # Neither direct hardcoding nor static linking is supported with a ++ # broken collect2. ++ hardcode_direct=unsupported ++ fi ++ ;; ++ ++ aix[4-9]*) ++ if test ia64 = "$host_cpu"; then ++ # On IA64, the linker does run time linking by default, so we don't ++ # have to do anything special. ++ aix_use_runtimelinking=no ++ exp_sym_flag='-Bexport' ++ no_entry_flag= ++ else ++ # If we're using GNU nm, then we don't want the "-C" option. ++ # -C means demangle to GNU nm, but means don't demangle to AIX nm. ++ # Without the "-l" option, or with the "-B" option, AIX nm treats ++ # weak defined symbols like other global defined symbols, whereas ++ # GNU nm marks them as "W". ++ # While the 'weak' keyword is ignored in the Export File, we need ++ # it in the Import File for the 'aix-soname' feature, so we have ++ # to replace the "-B" option with "-P" for AIX nm. ++ if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then ++ export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' ++ else ++ export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' ++ fi ++ aix_use_runtimelinking=no ++ ++ # Test if we are trying to use run time linking or normal ++ # AIX style linking. If -brtl is somewhere in LDFLAGS, we ++ # have runtime linking enabled, and use it for executables. ++ # For shared libraries, we enable/disable runtime linking ++ # depending on the kind of the shared library created - ++ # when "with_aix_soname,aix_use_runtimelinking" is: ++ # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables ++ # "aix,yes" lib.so shared, rtl:yes, for executables ++ # lib.a static archive ++ # "both,no" lib.so.V(shr.o) shared, rtl:yes ++ # lib.a(lib.so.V) shared, rtl:no, for executables ++ # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables ++ # lib.a(lib.so.V) shared, rtl:no ++ # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables ++ # lib.a static archive ++ case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) ++ for ld_flag in $LDFLAGS; do ++ if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then ++ aix_use_runtimelinking=yes ++ break ++ fi ++ done ++ if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then ++ # With aix-soname=svr4, we create the lib.so.V shared archives only, ++ # so we don't have lib.a shared libs to link our executables. ++ # We have to force runtime linking in this case. ++ aix_use_runtimelinking=yes ++ LDFLAGS="$LDFLAGS -Wl,-brtl" ++ fi ++ ;; ++ esac ++ ++ exp_sym_flag='-bexport' ++ no_entry_flag='-bnoentry' ++ fi ++ ++ # When large executables or shared objects are built, AIX ld can ++ # have problems creating the table of contents. If linking a library ++ # or program results in "error TOC overflow" add -mminimal-toc to ++ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not ++ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. ++ ++ archive_cmds='' ++ hardcode_direct=yes ++ hardcode_direct_absolute=yes ++ hardcode_libdir_separator=':' ++ link_all_deplibs=yes ++ file_list_spec='$wl-f,' ++ case $with_aix_soname,$aix_use_runtimelinking in ++ aix,*) ;; # traditional, no import file ++ svr4,* | *,yes) # use import file ++ # The Import File defines what to hardcode. ++ hardcode_direct=no ++ hardcode_direct_absolute=no ++ ;; ++ esac ++ ++ if test yes = "$GCC"; then ++ case $host_os in aix4.[012]|aix4.[012].*) ++ # We only want to do this on AIX 4.2 and lower, the check ++ # below for broken collect2 doesn't work under 4.3+ ++ collect2name=`$CC -print-prog-name=collect2` ++ if test -f "$collect2name" && ++ strings "$collect2name" | $GREP resolve_lib_name >/dev/null ++ then ++ # We have reworked collect2 ++ : ++ else ++ # We have old collect2 ++ hardcode_direct=unsupported ++ # It fails to find uninstalled libraries when the uninstalled ++ # path is not listed in the libpath. Setting hardcode_minus_L ++ # to unsupported forces relinking ++ hardcode_minus_L=yes ++ hardcode_libdir_flag_spec='-L$libdir' ++ hardcode_libdir_separator= ++ fi ++ ;; ++ esac ++ shared_flag='-shared' ++ if test yes = "$aix_use_runtimelinking"; then ++ shared_flag="$shared_flag "'$wl-G' ++ fi ++ # Need to ensure runtime linking is disabled for the traditional ++ # shared library, or the linker may eventually find shared libraries ++ # /with/ Import File - we do not want to mix them. ++ shared_flag_aix='-shared' ++ shared_flag_svr4='-shared $wl-G' ++ else ++ # not using gcc ++ if test ia64 = "$host_cpu"; then ++ # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release ++ # chokes on -Wl,-G. The following line is correct: ++ shared_flag='-G' ++ else ++ if test yes = "$aix_use_runtimelinking"; then ++ shared_flag='$wl-G' ++ else ++ shared_flag='$wl-bM:SRE' ++ fi ++ shared_flag_aix='$wl-bM:SRE' ++ shared_flag_svr4='$wl-G' ++ fi ++ fi ++ ++ export_dynamic_flag_spec='$wl-bexpall' ++ # It seems that -bexpall does not export symbols beginning with ++ # underscore (_), so it is better to generate a list of symbols to export. ++ always_export_symbols=yes ++ if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then ++ # Warning - without using the other runtime loading flags (-brtl), ++ # -berok will link without error, but may produce a broken library. ++ allow_undefined_flag='-berok' ++ # Determine the default libpath from the value encoded in an ++ # empty executable. ++ if test set = "${lt_cv_aix_libpath+set}"; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if test ${lt_cv_aix_libpath_+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_link "$LINENO" ++then : ++ ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=/usr/lib:/lib ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi ++ ++ hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" ++ archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag ++ else ++ if test ia64 = "$host_cpu"; then ++ hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib' ++ allow_undefined_flag="-z nodefs" ++ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" ++ else ++ # Determine the default libpath from the value encoded in an ++ # empty executable. ++ if test set = "${lt_cv_aix_libpath+set}"; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if test ${lt_cv_aix_libpath_+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_link "$LINENO" ++then : ++ ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=/usr/lib:/lib ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi ++ ++ hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" ++ # Warning - without using the other run time loading flags, ++ # -berok will link without error, but may produce a broken library. ++ no_undefined_flag=' $wl-bernotok' ++ allow_undefined_flag=' $wl-berok' ++ if test yes = "$with_gnu_ld"; then ++ # We only use this code for GNU lds that support --whole-archive. ++ whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive' ++ else ++ # Exported symbols can be pulled into shared objects from archives ++ whole_archive_flag_spec='$convenience' ++ fi ++ archive_cmds_need_lc=yes ++ archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' ++ # -brtl affects multiple linker settings, -berok does not and is overridden later ++ compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' ++ if test svr4 != "$with_aix_soname"; then ++ # This is similar to how AIX traditionally builds its shared libraries. ++ archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' ++ fi ++ if test aix != "$with_aix_soname"; then ++ archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' ++ else ++ # used by -dlpreopen to get the symbols ++ archive_expsym_cmds="$archive_expsym_cmds"'~$MV $output_objdir/$realname.d/$soname $output_objdir' ++ fi ++ archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d' ++ fi ++ fi ++ ;; ++ ++ amigaos*) ++ case $host_cpu in ++ powerpc) ++ # see comment about AmigaOS4 .so support ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_expsym_cmds='' ++ ;; ++ m68k) ++ archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' ++ hardcode_libdir_flag_spec='-L$libdir' ++ hardcode_minus_L=yes ++ ;; ++ esac ++ ;; ++ ++ bsdi[45]*) ++ export_dynamic_flag_spec=-rdynamic ++ ;; ++ ++ cygwin* | mingw* | pw32* | cegcc*) ++ # When not using gcc, we currently assume that we are using ++ # Microsoft Visual C++ or Intel C++ Compiler. ++ # hardcode_libdir_flag_spec is actually meaningless, as there is ++ # no search path for DLLs. ++ case $cc_basename in ++ cl* | icl*) ++ # Native MSVC or ICC ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ always_export_symbols=yes ++ file_list_spec='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=.dll ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' ++ archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then ++ cp "$export_symbols" "$output_objdir/$soname.def"; ++ echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; ++ else ++ $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' ++ enable_shared_with_static_runtimes=yes ++ exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ # Don't use ranlib ++ old_postinstall_cmds='chmod 644 $oldlib' ++ postlink_cmds='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile=$lt_outputfile.exe ++ lt_tool_outputfile=$lt_tool_outputfile.exe ++ ;; ++ esac~ ++ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # Assume MSVC and ICC wrapper ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=.dll ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' ++ # The linker will automatically build a .lib file if we build a DLL. ++ old_archive_from_new_cmds='true' ++ # FIXME: Should let the user specify the lib program. ++ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' ++ enable_shared_with_static_runtimes=yes ++ ;; ++ esac ++ ;; ++ ++ darwin* | rhapsody*) ++ ++ ++ archive_cmds_need_lc=no ++ hardcode_direct=no ++ hardcode_automatic=yes ++ hardcode_shlibpath_var=unsupported ++ if test yes = "$lt_cv_ld_force_load"; then ++ whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' ++ ++ else ++ whole_archive_flag_spec='' ++ fi ++ link_all_deplibs=yes ++ allow_undefined_flag=$_lt_dar_allow_undefined ++ case $cc_basename in ++ ifort*|nagfor*) _lt_dar_can_shared=yes ;; ++ *) _lt_dar_can_shared=$GCC ;; ++ esac ++ if test yes = "$_lt_dar_can_shared"; then ++ output_verbose_link_cmd=func_echo_all ++ archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" ++ module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" ++ archive_expsym_cmds="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" ++ module_expsym_cmds="$SED -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" ++ ++ else ++ ld_shlibs=no ++ fi ++ ++ ;; ++ ++ dgux*) ++ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ hardcode_libdir_flag_spec='-L$libdir' ++ hardcode_shlibpath_var=no ++ ;; ++ ++ # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor ++ # support. Future versions do this automatically, but an explicit c++rt0.o ++ # does not break anything, and helps significantly (at the cost of a little ++ # extra space). ++ freebsd2.2*) ++ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' ++ hardcode_libdir_flag_spec='-R$libdir' ++ hardcode_direct=yes ++ hardcode_shlibpath_var=no ++ ;; ++ ++ # Unfortunately, older versions of FreeBSD 2 do not have this feature. ++ freebsd2.*) ++ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' ++ hardcode_direct=yes ++ hardcode_minus_L=yes ++ hardcode_shlibpath_var=no ++ ;; ++ ++ # FreeBSD 3 and greater uses gcc -shared to do shared libraries. ++ freebsd* | dragonfly* | midnightbsd*) ++ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ++ hardcode_libdir_flag_spec='-R$libdir' ++ hardcode_direct=yes ++ hardcode_shlibpath_var=no ++ ;; ++ ++ hpux9*) ++ if test yes = "$GCC"; then ++ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' ++ else ++ archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' ++ fi ++ hardcode_libdir_flag_spec='$wl+b $wl$libdir' ++ hardcode_libdir_separator=: ++ hardcode_direct=yes ++ ++ # hardcode_minus_L: Not really in the search PATH, ++ # but as the default location of the library. ++ hardcode_minus_L=yes ++ export_dynamic_flag_spec='$wl-E' ++ ;; ++ ++ hpux10*) ++ if test yes,no = "$GCC,$with_gnu_ld"; then ++ archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ else ++ archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' ++ fi ++ if test no = "$with_gnu_ld"; then ++ hardcode_libdir_flag_spec='$wl+b $wl$libdir' ++ hardcode_libdir_separator=: ++ hardcode_direct=yes ++ hardcode_direct_absolute=yes ++ export_dynamic_flag_spec='$wl-E' ++ # hardcode_minus_L: Not really in the search PATH, ++ # but as the default location of the library. ++ hardcode_minus_L=yes ++ fi ++ ;; ++ ++ hpux11*) ++ if test yes,no = "$GCC,$with_gnu_ld"; then ++ case $host_cpu in ++ hppa*64*) ++ archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ ia64*) ++ archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ *) ++ archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ esac ++ else ++ case $host_cpu in ++ hppa*64*) ++ archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ ia64*) ++ archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ *) ++ ++ # Older versions of the 11.00 compiler do not understand -b yet ++ # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 ++printf %s "checking if $CC understands -b... " >&6; } ++if test ${lt_cv_prog_compiler__b+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_prog_compiler__b=no ++ save_LDFLAGS=$LDFLAGS ++ LDFLAGS="$LDFLAGS -b" ++ echo "$lt_simple_link_test_code" > conftest.$ac_ext ++ if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then ++ # The linker can only warn and ignore the option if not recognized ++ # So say no if there are warnings ++ if test -s conftest.err; then ++ # Append any errors to the config.log. ++ cat conftest.err 1>&5 ++ $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp ++ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 ++ if diff conftest.exp conftest.er2 >/dev/null; then ++ lt_cv_prog_compiler__b=yes ++ fi ++ else ++ lt_cv_prog_compiler__b=yes ++ fi ++ fi ++ $RM -r conftest* ++ LDFLAGS=$save_LDFLAGS ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 ++printf "%s\n" "$lt_cv_prog_compiler__b" >&6; } ++ ++if test yes = "$lt_cv_prog_compiler__b"; then ++ archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++else ++ archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' ++fi ++ ++ ;; ++ esac ++ fi ++ if test no = "$with_gnu_ld"; then ++ hardcode_libdir_flag_spec='$wl+b $wl$libdir' ++ hardcode_libdir_separator=: ++ ++ case $host_cpu in ++ hppa*64*|ia64*) ++ hardcode_direct=no ++ hardcode_shlibpath_var=no ++ ;; ++ *) ++ hardcode_direct=yes ++ hardcode_direct_absolute=yes ++ export_dynamic_flag_spec='$wl-E' ++ ++ # hardcode_minus_L: Not really in the search PATH, ++ # but as the default location of the library. ++ hardcode_minus_L=yes ++ ;; ++ esac ++ fi ++ ;; ++ ++ irix5* | irix6* | nonstopux*) ++ if test yes = "$GCC"; then ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ # Try to use the -exported_symbol ld option, if it does not ++ # work, assume that -exports_file does not work either and ++ # implicitly export all symbols. ++ # This should be the same for all languages, so no per-tag cache variable. ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 ++printf %s "checking whether the $host_os linker accepts -exported_symbol... " >&6; } ++if test ${lt_cv_irix_exported_symbol+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ save_LDFLAGS=$LDFLAGS ++ LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++int foo (void) { return 0; } ++_ACEOF ++if ac_fn_c_try_link "$LINENO" ++then : ++ lt_cv_irix_exported_symbol=yes ++else $as_nop ++ lt_cv_irix_exported_symbol=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++ LDFLAGS=$save_LDFLAGS ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 ++printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; } ++ if test yes = "$lt_cv_irix_exported_symbol"; then ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' ++ fi ++ else ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' ++ fi ++ archive_cmds_need_lc='no' ++ hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' ++ hardcode_libdir_separator=: ++ inherit_rpath=yes ++ link_all_deplibs=yes ++ ;; ++ ++ linux*) ++ case $cc_basename in ++ tcc*) ++ # Fabrice Bellard et al's Tiny C Compiler ++ ld_shlibs=yes ++ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ esac ++ ;; ++ ++ netbsd*) ++ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then ++ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out ++ else ++ archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF ++ fi ++ hardcode_libdir_flag_spec='-R$libdir' ++ hardcode_direct=yes ++ hardcode_shlibpath_var=no ++ ;; ++ ++ newsos6) ++ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ hardcode_direct=yes ++ hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' ++ hardcode_libdir_separator=: ++ hardcode_shlibpath_var=no ++ ;; ++ ++ *nto* | *qnx*) ++ ;; ++ ++ openbsd* | bitrig*) ++ if test -f /usr/libexec/ld.so; then ++ hardcode_direct=yes ++ hardcode_shlibpath_var=no ++ hardcode_direct_absolute=yes ++ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then ++ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' ++ hardcode_libdir_flag_spec='$wl-rpath,$libdir' ++ export_dynamic_flag_spec='$wl-E' ++ else ++ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ++ hardcode_libdir_flag_spec='$wl-rpath,$libdir' ++ fi ++ else ++ ld_shlibs=no ++ fi ++ ;; ++ ++ os2*) ++ hardcode_libdir_flag_spec='-L$libdir' ++ hardcode_minus_L=yes ++ allow_undefined_flag=unsupported ++ shrext_cmds=.dll ++ archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ ++ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ ++ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ ++ $ECHO EXPORTS >> $output_objdir/$libname.def~ ++ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ ++ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ ++ emximp -o $lib $output_objdir/$libname.def' ++ archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ ++ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ ++ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ ++ $ECHO EXPORTS >> $output_objdir/$libname.def~ ++ prefix_cmds="$SED"~ ++ if test EXPORTS = "`$SED 1q $export_symbols`"; then ++ prefix_cmds="$prefix_cmds -e 1d"; ++ fi~ ++ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ ++ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ ++ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ ++ emximp -o $lib $output_objdir/$libname.def' ++ old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' ++ enable_shared_with_static_runtimes=yes ++ file_list_spec='@' ++ ;; ++ ++ osf3*) ++ if test yes = "$GCC"; then ++ allow_undefined_flag=' $wl-expect_unresolved $wl\*' ++ archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ else ++ allow_undefined_flag=' -expect_unresolved \*' ++ archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ fi ++ archive_cmds_need_lc='no' ++ hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' ++ hardcode_libdir_separator=: ++ ;; ++ ++ osf4* | osf5*) # as osf3* with the addition of -msym flag ++ if test yes = "$GCC"; then ++ allow_undefined_flag=' $wl-expect_unresolved $wl\*' ++ archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' ++ else ++ allow_undefined_flag=' -expect_unresolved \*' ++ archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ ++ $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' ++ ++ # Both c and cxx compiler support -rpath directly ++ hardcode_libdir_flag_spec='-rpath $libdir' ++ fi ++ archive_cmds_need_lc='no' ++ hardcode_libdir_separator=: ++ ;; ++ ++ solaris*) ++ no_undefined_flag=' -z defs' ++ if test yes = "$GCC"; then ++ wlarc='$wl' ++ archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ ++ $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ else ++ case `$CC -V 2>&1` in ++ *"Compilers 5.0"*) ++ wlarc='' ++ archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ ++ $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ++ ;; ++ *) ++ wlarc='$wl' ++ archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ ++ $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ ;; ++ esac ++ fi ++ hardcode_libdir_flag_spec='-R$libdir' ++ hardcode_shlibpath_var=no ++ case $host_os in ++ solaris2.[0-5] | solaris2.[0-5].*) ;; ++ *) ++ # The compiler driver will combine and reorder linker options, ++ # but understands '-z linker_flag'. GCC discards it without '$wl', ++ # but is careful enough not to reorder. ++ # Supported since Solaris 2.6 (maybe 2.5.1?) ++ if test yes = "$GCC"; then ++ whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' ++ else ++ whole_archive_flag_spec='-z allextract$convenience -z defaultextract' ++ fi ++ ;; ++ esac ++ link_all_deplibs=yes ++ ;; ++ ++ sunos4*) ++ if test sequent = "$host_vendor"; then ++ # Use $CC to link under sequent, because it throws in some extra .o ++ # files that make .init and .fini sections work. ++ archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' ++ else ++ archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' ++ fi ++ hardcode_libdir_flag_spec='-L$libdir' ++ hardcode_direct=yes ++ hardcode_minus_L=yes ++ hardcode_shlibpath_var=no ++ ;; ++ ++ sysv4) ++ case $host_vendor in ++ sni) ++ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ hardcode_direct=yes # is this really true??? ++ ;; ++ siemens) ++ ## LD is ld it makes a PLAMLIB ++ ## CC just makes a GrossModule. ++ archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' ++ reload_cmds='$CC -r -o $output$reload_objs' ++ hardcode_direct=no ++ ;; ++ motorola) ++ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ hardcode_direct=no #Motorola manual says yes, but my tests say they lie ++ ;; ++ esac ++ runpath_var='LD_RUN_PATH' ++ hardcode_shlibpath_var=no ++ ;; ++ ++ sysv4.3*) ++ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ hardcode_shlibpath_var=no ++ export_dynamic_flag_spec='-Bexport' ++ ;; ++ ++ sysv4*MP*) ++ if test -d /usr/nec; then ++ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ hardcode_shlibpath_var=no ++ runpath_var=LD_RUN_PATH ++ hardcode_runpath_var=yes ++ ld_shlibs=yes ++ fi ++ ;; ++ ++ sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) ++ no_undefined_flag='$wl-z,text' ++ archive_cmds_need_lc=no ++ hardcode_shlibpath_var=no ++ runpath_var='LD_RUN_PATH' ++ ++ if test yes = "$GCC"; then ++ archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ else ++ archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ fi ++ ;; ++ ++ sysv5* | sco3.2v5* | sco5v6*) ++ # Note: We CANNOT use -z defs as we might desire, because we do not ++ # link with -lc, and that would cause any symbols used from libc to ++ # always be unresolved, which means just about no library would ++ # ever link correctly. If we're not using GNU ld we use -z text ++ # though, which does catch some bad symbols but isn't as heavy-handed ++ # as -z defs. ++ no_undefined_flag='$wl-z,text' ++ allow_undefined_flag='$wl-z,nodefs' ++ archive_cmds_need_lc=no ++ hardcode_shlibpath_var=no ++ hardcode_libdir_flag_spec='$wl-R,$libdir' ++ hardcode_libdir_separator=':' ++ link_all_deplibs=yes ++ export_dynamic_flag_spec='$wl-Bexport' ++ runpath_var='LD_RUN_PATH' ++ ++ if test yes = "$GCC"; then ++ archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ else ++ archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ fi ++ ;; ++ ++ uts4*) ++ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ hardcode_libdir_flag_spec='-L$libdir' ++ hardcode_shlibpath_var=no ++ ;; ++ ++ *) ++ ld_shlibs=no ++ ;; ++ esac ++ ++ if test sni = "$host_vendor"; then ++ case $host in ++ sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) ++ export_dynamic_flag_spec='$wl-Blargedynsym' ++ ;; ++ esac ++ fi ++ fi ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 ++printf "%s\n" "$ld_shlibs" >&6; } ++test no = "$ld_shlibs" && can_build_shared=no ++ ++with_gnu_ld=$with_gnu_ld ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++# ++# Do we need to explicitly link libc? ++# ++case "x$archive_cmds_need_lc" in ++x|xyes) ++ # Assume -lc should be added ++ archive_cmds_need_lc=yes ++ ++ if test yes,yes = "$GCC,$enable_shared"; then ++ case $archive_cmds in ++ *'~'*) ++ # FIXME: we may have to deal with multi-command sequences. ++ ;; ++ '$CC '*) ++ # Test whether the compiler implicitly links with -lc since on some ++ # systems, -lgcc has to come before -lc. If gcc already passes -lc ++ # to ld, don't add -lc before -lgcc. ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 ++printf %s "checking whether -lc should be explicitly linked in... " >&6; } ++if test ${lt_cv_archive_cmds_need_lc+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ $RM conftest* ++ echo "$lt_simple_compile_test_code" > conftest.$ac_ext ++ ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 ++ (eval $ac_compile) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } 2>conftest.err; then ++ soname=conftest ++ lib=conftest ++ libobjs=conftest.$ac_objext ++ deplibs= ++ wl=$lt_prog_compiler_wl ++ pic_flag=$lt_prog_compiler_pic ++ compiler_flags=-v ++ linker_flags=-v ++ verstring= ++ output_objdir=. ++ libname=conftest ++ lt_save_allow_undefined_flag=$allow_undefined_flag ++ allow_undefined_flag= ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 ++ (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ then ++ lt_cv_archive_cmds_need_lc=no ++ else ++ lt_cv_archive_cmds_need_lc=yes ++ fi ++ allow_undefined_flag=$lt_save_allow_undefined_flag ++ else ++ cat conftest.err 1>&5 ++ fi ++ $RM conftest* ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 ++printf "%s\n" "$lt_cv_archive_cmds_need_lc" >&6; } ++ archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc ++ ;; ++ esac ++ fi ++ ;; ++esac ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 ++printf %s "checking dynamic linker characteristics... " >&6; } ++ ++if test yes = "$GCC"; then ++ case $host_os in ++ darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; ++ *) lt_awk_arg='/^libraries:/' ;; ++ esac ++ case $host_os in ++ mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;; ++ *) lt_sed_strip_eq='s|=/|/|g' ;; ++ esac ++ lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` ++ case $lt_search_path_spec in ++ *\;*) ++ # if the path contains ";" then we assume it to be the separator ++ # otherwise default to the standard path separator (i.e. ":") - it is ++ # assumed that no part of a normal pathname contains ";" but that should ++ # okay in the real world where ";" in dirpaths is itself problematic. ++ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` ++ ;; ++ *) ++ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ esac ++ # Ok, now we have the path, separated by spaces, we can step through it ++ # and add multilib dir if necessary... ++ lt_tmp_lt_search_path_spec= ++ lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` ++ # ...but if some path component already ends with the multilib dir we assume ++ # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). ++ case "$lt_multi_os_dir; $lt_search_path_spec " in ++ "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) ++ lt_multi_os_dir= ++ ;; ++ esac ++ for lt_sys_path in $lt_search_path_spec; do ++ if test -d "$lt_sys_path$lt_multi_os_dir"; then ++ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" ++ elif test -n "$lt_multi_os_dir"; then ++ test -d "$lt_sys_path" && \ ++ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" ++ fi ++ done ++ lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' ++BEGIN {RS = " "; FS = "/|\n";} { ++ lt_foo = ""; ++ lt_count = 0; ++ for (lt_i = NF; lt_i > 0; lt_i--) { ++ if ($lt_i != "" && $lt_i != ".") { ++ if ($lt_i == "..") { ++ lt_count++; ++ } else { ++ if (lt_count == 0) { ++ lt_foo = "/" $lt_i lt_foo; ++ } else { ++ lt_count--; ++ } ++ } ++ } ++ } ++ if (lt_foo != "") { lt_freq[lt_foo]++; } ++ if (lt_freq[lt_foo] == 1) { print lt_foo; } ++}'` ++ # AWK program above erroneously prepends '/' to C:/dos/paths ++ # for these hosts. ++ case $host_os in ++ mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ ++ $SED 's|/\([A-Za-z]:\)|\1|g'` ;; ++ esac ++ sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` ++else ++ sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" ++fi ++library_names_spec= ++libname_spec='lib$name' ++soname_spec= ++shrext_cmds=.so ++postinstall_cmds= ++postuninstall_cmds= ++finish_cmds= ++finish_eval= ++shlibpath_var= ++shlibpath_overrides_runpath=unknown ++version_type=none ++dynamic_linker="$host_os ld.so" ++sys_lib_dlsearch_path_spec="/lib /usr/lib" ++need_lib_prefix=unknown ++hardcode_into_libs=no ++ ++# when you set need_version to no, make sure it does not cause -set_version ++# flags to be left without arguments ++need_version=unknown ++ ++ ++ ++case $host_os in ++aix3*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$release$shared_ext$versuffix $libname.a' ++ shlibpath_var=LIBPATH ++ ++ # AIX 3 has no versioning support, so we append a major version to the name. ++ soname_spec='$libname$release$shared_ext$major' ++ ;; ++ ++aix[4-9]*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ hardcode_into_libs=yes ++ if test ia64 = "$host_cpu"; then ++ # AIX 5 supports IA64 ++ library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' ++ shlibpath_var=LD_LIBRARY_PATH ++ else ++ # With GCC up to 2.95.x, collect2 would create an import file ++ # for dependence libraries. The import file would start with ++ # the line '#! .'. This would cause the generated library to ++ # depend on '.', always an invalid library. This was fixed in ++ # development snapshots of GCC prior to 3.0. ++ case $host_os in ++ aix4 | aix4.[01] | aix4.[01].*) ++ if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' ++ echo ' yes ' ++ echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then ++ : ++ else ++ can_build_shared=no ++ fi ++ ;; ++ esac ++ # Using Import Files as archive members, it is possible to support ++ # filename-based versioning of shared library archives on AIX. While ++ # this would work for both with and without runtime linking, it will ++ # prevent static linking of such archives. So we do filename-based ++ # shared library versioning with .so extension only, which is used ++ # when both runtime linking and shared linking is enabled. ++ # Unfortunately, runtime linking may impact performance, so we do ++ # not want this to be the default eventually. Also, we use the ++ # versioned .so libs for executables only if there is the -brtl ++ # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. ++ # To allow for filename-based versioning support, we need to create ++ # libNAME.so.V as an archive file, containing: ++ # *) an Import File, referring to the versioned filename of the ++ # archive as well as the shared archive member, telling the ++ # bitwidth (32 or 64) of that shared object, and providing the ++ # list of exported symbols of that shared object, eventually ++ # decorated with the 'weak' keyword ++ # *) the shared object with the F_LOADONLY flag set, to really avoid ++ # it being seen by the linker. ++ # At run time we better use the real file rather than another symlink, ++ # but for link time we create the symlink libNAME.so -> libNAME.so.V ++ ++ case $with_aix_soname,$aix_use_runtimelinking in ++ # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct ++ # soname into executable. Probably we can add versioning support to ++ # collect2, so additional links can be useful in future. ++ aix,yes) # traditional libtool ++ dynamic_linker='AIX unversionable lib.so' ++ # If using run time linking (on AIX 4.2 or later) use lib.so ++ # instead of lib.a to let people know that these are not ++ # typical AIX shared libraries. ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ ;; ++ aix,no) # traditional AIX only ++ dynamic_linker='AIX lib.a(lib.so.V)' ++ # We preserve .a as extension for shared libraries through AIX4.2 ++ # and later when we are not doing run time linking. ++ library_names_spec='$libname$release.a $libname.a' ++ soname_spec='$libname$release$shared_ext$major' ++ ;; ++ svr4,*) # full svr4 only ++ dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" ++ library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' ++ # We do not specify a path in Import Files, so LIBPATH fires. ++ shlibpath_overrides_runpath=yes ++ ;; ++ *,yes) # both, prefer svr4 ++ dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" ++ library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' ++ # unpreferred sharedlib libNAME.a needs extra handling ++ postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' ++ postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' ++ # We do not specify a path in Import Files, so LIBPATH fires. ++ shlibpath_overrides_runpath=yes ++ ;; ++ *,no) # both, prefer aix ++ dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" ++ library_names_spec='$libname$release.a $libname.a' ++ soname_spec='$libname$release$shared_ext$major' ++ # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling ++ postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' ++ postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' ++ ;; ++ esac ++ shlibpath_var=LIBPATH ++ fi ++ ;; ++ ++amigaos*) ++ case $host_cpu in ++ powerpc) ++ # Since July 2007 AmigaOS4 officially supports .so libraries. ++ # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ ;; ++ m68k) ++ library_names_spec='$libname.ixlibrary $libname.a' ++ # Create ${libname}_ixlibrary.a entries in /sys/libs. ++ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ++ ;; ++ esac ++ ;; ++ ++beos*) ++ library_names_spec='$libname$shared_ext' ++ dynamic_linker="$host_os ld.so" ++ shlibpath_var=LIBRARY_PATH ++ ;; ++ ++bsdi[45]*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' ++ shlibpath_var=LD_LIBRARY_PATH ++ sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" ++ sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" ++ # the default ld.so.conf also contains /usr/contrib/lib and ++ # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow ++ # libtool to hard-code these into programs ++ ;; ++ ++cygwin* | mingw* | pw32* | cegcc*) ++ version_type=windows ++ shrext_cmds=.dll ++ need_version=no ++ need_lib_prefix=no ++ ++ case $GCC,$cc_basename in ++ yes,*) ++ # gcc ++ library_names_spec='$libname.dll.a' ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \$file`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname~ ++ chmod a+x \$dldir/$dlname~ ++ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then ++ eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; ++ fi' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ ++ case $host_os in ++ cygwin*) ++ # Cygwin DLLs use 'cyg' prefix rather than 'lib' ++ soname_spec='`echo $libname | $SED -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ++ ++ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" ++ ;; ++ mingw* | cegcc*) ++ # MinGW DLLs use traditional 'lib' prefix ++ soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ++ ;; ++ pw32*) ++ # pw32 DLLs use 'pw' prefix rather than 'lib' ++ library_names_spec='`echo $libname | $SED -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ++ ;; ++ esac ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ ++ *,cl* | *,icl*) ++ # Native MSVC or ICC ++ libname_spec='$name' ++ soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ++ library_names_spec='$libname.dll.lib' ++ ++ case $build_os in ++ mingw*) ++ sys_lib_search_path_spec= ++ lt_save_ifs=$IFS ++ IFS=';' ++ for lt_path in $LIB ++ do ++ IFS=$lt_save_ifs ++ # Let DOS variable expansion print the short 8.3 style file name. ++ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` ++ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" ++ done ++ IFS=$lt_save_ifs ++ # Convert to MSYS style. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ++ ;; ++ cygwin*) ++ # Convert to unix form, then to dos form, then back to unix form ++ # but this time dos style (no spaces!) so that the unix form looks ++ # like /cygdrive/c/PROGRA~1:/cygdr... ++ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` ++ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` ++ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ *) ++ sys_lib_search_path_spec=$LIB ++ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then ++ # It is most probably a Windows format PATH. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` ++ else ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ fi ++ # FIXME: find the short name or the path components, as spaces are ++ # common. (e.g. "Program Files" -> "PROGRA~1") ++ ;; ++ esac ++ ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \$file`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ dynamic_linker='Win32 link.exe' ++ ;; ++ ++ *) ++ # Assume MSVC and ICC wrapper ++ library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ esac ++ # FIXME: first we should search . and the directory the executable is in ++ shlibpath_var=PATH ++ ;; ++ ++darwin* | rhapsody*) ++ dynamic_linker="$host_os dyld" ++ version_type=darwin ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' ++ soname_spec='$libname$release$major$shared_ext' ++ shlibpath_overrides_runpath=yes ++ shlibpath_var=DYLD_LIBRARY_PATH ++ shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' ++ ++ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" ++ sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ++ ;; ++ ++dgux*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ ;; ++ ++freebsd* | dragonfly* | midnightbsd*) ++ # DragonFly does not have aout. When/if they implement a new ++ # versioning mechanism, adjust this. ++ if test -x /usr/bin/objformat; then ++ objformat=`/usr/bin/objformat` ++ else ++ case $host_os in ++ freebsd[23].*) objformat=aout ;; ++ *) objformat=elf ;; ++ esac ++ fi ++ version_type=freebsd-$objformat ++ case $version_type in ++ freebsd-elf*) ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ need_version=no ++ need_lib_prefix=no ++ ;; ++ freebsd-*) ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ need_version=yes ++ ;; ++ esac ++ shlibpath_var=LD_LIBRARY_PATH ++ case $host_os in ++ freebsd2.*) ++ shlibpath_overrides_runpath=yes ++ ;; ++ freebsd3.[01]* | freebsdelf3.[01]*) ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ ;; ++ freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ ++ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) ++ shlibpath_overrides_runpath=no ++ hardcode_into_libs=yes ++ ;; ++ *) # from 4.6 on, and DragonFly ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ ;; ++ esac ++ ;; ++ ++haiku*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ dynamic_linker="$host_os runtime_loader" ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' ++ hardcode_into_libs=yes ++ ;; ++ ++hpux9* | hpux10* | hpux11*) ++ # Give a soname corresponding to the major version so that dld.sl refuses to ++ # link against other versions. ++ version_type=sunos ++ need_lib_prefix=no ++ need_version=no ++ case $host_cpu in ++ ia64*) ++ shrext_cmds='.so' ++ hardcode_into_libs=yes ++ dynamic_linker="$host_os dld.so" ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ if test 32 = "$HPUX_IA64_MODE"; then ++ sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" ++ sys_lib_dlsearch_path_spec=/usr/lib/hpux32 ++ else ++ sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" ++ sys_lib_dlsearch_path_spec=/usr/lib/hpux64 ++ fi ++ ;; ++ hppa*64*) ++ shrext_cmds='.sl' ++ hardcode_into_libs=yes ++ dynamic_linker="$host_os dld.sl" ++ shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH ++ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" ++ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ++ ;; ++ *) ++ shrext_cmds='.sl' ++ dynamic_linker="$host_os dld.sl" ++ shlibpath_var=SHLIB_PATH ++ shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ ;; ++ esac ++ # HP-UX runs *really* slowly unless shared libraries are mode 555, ... ++ postinstall_cmds='chmod 555 $lib' ++ # or fails outright, so override atomically: ++ install_override_mode=555 ++ ;; ++ ++interix[3-9]*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ hardcode_into_libs=yes ++ ;; ++ ++irix5* | irix6* | nonstopux*) ++ case $host_os in ++ nonstopux*) version_type=nonstopux ;; ++ *) ++ if test yes = "$lt_cv_prog_gnu_ld"; then ++ version_type=linux # correct to gnu/linux during the next big refactor ++ else ++ version_type=irix ++ fi ;; ++ esac ++ need_lib_prefix=no ++ need_version=no ++ soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' ++ case $host_os in ++ irix5* | nonstopux*) ++ libsuff= shlibsuff= ++ ;; ++ *) ++ case $LD in # libtool.m4 will add one of these switches to LD ++ *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") ++ libsuff= shlibsuff= libmagic=32-bit;; ++ *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") ++ libsuff=32 shlibsuff=N32 libmagic=N32;; ++ *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") ++ libsuff=64 shlibsuff=64 libmagic=64-bit;; ++ *) libsuff= shlibsuff= libmagic=never-match;; ++ esac ++ ;; ++ esac ++ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH ++ shlibpath_overrides_runpath=no ++ sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" ++ sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" ++ hardcode_into_libs=yes ++ ;; ++ ++# No shared lib support for Linux oldld, aout, or coff. ++linux*oldld* | linux*aout* | linux*coff*) ++ dynamic_linker=no ++ ;; ++ ++linux*android*) ++ version_type=none # Android doesn't support versioned libraries. ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext' ++ soname_spec='$libname$release$shared_ext' ++ finish_cmds= ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ ++ # This implies no fast_install, which is unacceptable. ++ # Some rework will be needed to allow for fast_install ++ # before this can be enabled. ++ hardcode_into_libs=yes ++ ++ dynamic_linker='Android linker' ++ # Don't embed -rpath directories since the linker doesn't support them. ++ hardcode_libdir_flag_spec='-L$libdir' ++ ;; ++ ++# This must be glibc/ELF. ++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ ++ # Some binutils ld are patched to set DT_RUNPATH ++ if test ${lt_cv_shlibpath_overrides_runpath+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_shlibpath_overrides_runpath=no ++ save_LDFLAGS=$LDFLAGS ++ save_libdir=$libdir ++ eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ ++ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_link "$LINENO" ++then : ++ if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null ++then : ++ lt_cv_shlibpath_overrides_runpath=yes ++fi ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++ LDFLAGS=$save_LDFLAGS ++ libdir=$save_libdir ++ ++fi ++ ++ shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath ++ ++ # This implies no fast_install, which is unacceptable. ++ # Some rework will be needed to allow for fast_install ++ # before this can be enabled. ++ hardcode_into_libs=yes ++ ++ # Add ABI-specific directories to the system library path. ++ sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" ++ ++ # Ideally, we could use ldconfig to report *all* directores which are ++ # searched for libraries, however this is still not possible. Aside from not ++ # being certain /sbin/ldconfig is available, command ++ # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, ++ # even though it is searched at run-time. Try to do the best guess by ++ # appending ld.so.conf contents (and includes) to the search path. ++ if test -f /etc/ld.so.conf; then ++ lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` ++ sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" ++ fi ++ ++ # We used to test for /lib/ld.so.1 and disable shared libraries on ++ # powerpc, because MkLinux only supported shared libraries with the ++ # GNU dynamic linker. Since this was broken with cross compilers, ++ # most powerpc-linux boxes support dynamic linking these days and ++ # people can always --disable-shared, the test was removed, and we ++ # assume the GNU/Linux dynamic linker is in use. ++ dynamic_linker='GNU/Linux ld.so' ++ ;; ++ ++netbsd*) ++ version_type=sunos ++ need_lib_prefix=no ++ need_version=no ++ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' ++ dynamic_linker='NetBSD (a.out) ld.so' ++ else ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ dynamic_linker='NetBSD ld.elf_so' ++ fi ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ ;; ++ ++newsos6) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ ;; ++ ++*nto* | *qnx*) ++ version_type=qnx ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ hardcode_into_libs=yes ++ dynamic_linker='ldqnx.so' ++ ;; ++ ++openbsd* | bitrig*) ++ version_type=sunos ++ sys_lib_dlsearch_path_spec=/usr/lib ++ need_lib_prefix=no ++ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then ++ need_version=no ++ else ++ need_version=yes ++ fi ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ ;; ++ ++os2*) ++ libname_spec='$name' ++ version_type=windows ++ shrext_cmds=.dll ++ need_version=no ++ need_lib_prefix=no ++ # OS/2 can only load a DLL with a base name of 8 characters or less. ++ soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; ++ v=$($ECHO $release$versuffix | tr -d .-); ++ n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); ++ $ECHO $n$v`$shared_ext' ++ library_names_spec='${libname}_dll.$libext' ++ dynamic_linker='OS/2 ld.exe' ++ shlibpath_var=BEGINLIBPATH ++ sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" ++ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ++ postinstall_cmds='base_file=`basename \$file`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname~ ++ chmod a+x \$dldir/$dlname~ ++ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then ++ eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; ++ fi' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ ;; ++ ++osf3* | osf4* | osf5*) ++ version_type=osf ++ need_lib_prefix=no ++ need_version=no ++ soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ shlibpath_var=LD_LIBRARY_PATH ++ sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" ++ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ++ ;; ++ ++rdos*) ++ dynamic_linker=no ++ ;; ++ ++solaris*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ # ldd complains unless libraries are executable ++ postinstall_cmds='chmod +x $lib' ++ ;; ++ ++sunos4*) ++ version_type=sunos ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ if test yes = "$with_gnu_ld"; then ++ need_lib_prefix=no ++ fi ++ need_version=yes ++ ;; ++ ++sysv4 | sysv4.3*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ case $host_vendor in ++ sni) ++ shlibpath_overrides_runpath=no ++ need_lib_prefix=no ++ runpath_var=LD_RUN_PATH ++ ;; ++ siemens) ++ need_lib_prefix=no ++ ;; ++ motorola) ++ need_lib_prefix=no ++ need_version=no ++ shlibpath_overrides_runpath=no ++ sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ++ ;; ++ esac ++ ;; ++ ++sysv4*MP*) ++ if test -d /usr/nec; then ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' ++ soname_spec='$libname$shared_ext.$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ fi ++ ;; ++ ++sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) ++ version_type=sco ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ if test yes = "$with_gnu_ld"; then ++ sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' ++ else ++ sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' ++ case $host_os in ++ sco3.2v5*) ++ sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ++ ;; ++ esac ++ fi ++ sys_lib_dlsearch_path_spec='/usr/lib' ++ ;; ++ ++tpf*) ++ # TPF is a cross-target only. Preferred cross-host = GNU/Linux. ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ hardcode_into_libs=yes ++ ;; ++ ++uts4*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ ;; ++ ++*) ++ dynamic_linker=no ++ ;; ++esac ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 ++printf "%s\n" "$dynamic_linker" >&6; } ++test no = "$dynamic_linker" && can_build_shared=no ++ ++variables_saved_for_relink="PATH $shlibpath_var $runpath_var" ++if test yes = "$GCC"; then ++ variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" ++fi ++ ++if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then ++ sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec ++fi ++ ++if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then ++ sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec ++fi ++ ++# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... ++configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec ++ ++# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code ++func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" ++ ++# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool ++configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 ++printf %s "checking how to hardcode library paths into programs... " >&6; } ++hardcode_action= ++if test -n "$hardcode_libdir_flag_spec" || ++ test -n "$runpath_var" || ++ test yes = "$hardcode_automatic"; then ++ ++ # We can hardcode non-existent directories. ++ if test no != "$hardcode_direct" && ++ # If the only mechanism to avoid hardcoding is shlibpath_var, we ++ # have to relink, otherwise we might link with an installed library ++ # when we should be linking with a yet-to-be-installed one ++ ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" && ++ test no != "$hardcode_minus_L"; then ++ # Linking always hardcodes the temporary library directory. ++ hardcode_action=relink ++ else ++ # We can link without hardcoding, and we can hardcode nonexisting dirs. ++ hardcode_action=immediate ++ fi ++else ++ # We cannot hardcode anything, or else we can only hardcode existing ++ # directories. ++ hardcode_action=unsupported ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 ++printf "%s\n" "$hardcode_action" >&6; } ++ ++if test relink = "$hardcode_action" || ++ test yes = "$inherit_rpath"; then ++ # Fast installation is not supported ++ enable_fast_install=no ++elif test yes = "$shlibpath_overrides_runpath" || ++ test no = "$enable_shared"; then ++ # Fast installation is not necessary ++ enable_fast_install=needless ++fi ++ ++ ++ ++ ++ ++ ++ if test yes != "$enable_dlopen"; then ++ enable_dlopen=unknown ++ enable_dlopen_self=unknown ++ enable_dlopen_self_static=unknown ++else ++ lt_cv_dlopen=no ++ lt_cv_dlopen_libs= ++ ++ case $host_os in ++ beos*) ++ lt_cv_dlopen=load_add_on ++ lt_cv_dlopen_libs= ++ lt_cv_dlopen_self=yes ++ ;; ++ ++ mingw* | pw32* | cegcc*) ++ lt_cv_dlopen=LoadLibrary ++ lt_cv_dlopen_libs= ++ ;; ++ ++ cygwin*) ++ lt_cv_dlopen=dlopen ++ lt_cv_dlopen_libs= ++ ;; ++ ++ darwin*) ++ # if libdl is installed we need to link against it ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 ++printf %s "checking for dlopen in -ldl... " >&6; } ++if test ${ac_cv_lib_dl_dlopen+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_check_lib_save_LIBS=$LIBS ++LIBS="-ldl $LIBS" ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++/* Override any GCC internal prototype to avoid an error. ++ Use char because int might match the return type of a GCC ++ builtin and then its argument prototype would still apply. */ ++char dlopen (); ++int ++main (void) ++{ ++return dlopen (); ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_link "$LINENO" ++then : ++ ac_cv_lib_dl_dlopen=yes ++else $as_nop ++ ac_cv_lib_dl_dlopen=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++LIBS=$ac_check_lib_save_LIBS ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 ++printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; } ++if test "x$ac_cv_lib_dl_dlopen" = xyes ++then : ++ lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl ++else $as_nop ++ ++ lt_cv_dlopen=dyld ++ lt_cv_dlopen_libs= ++ lt_cv_dlopen_self=yes ++ ++fi ++ ++ ;; ++ ++ tpf*) ++ # Don't try to run any link tests for TPF. We know it's impossible ++ # because TPF is a cross-compiler, and we know how we open DSOs. ++ lt_cv_dlopen=dlopen ++ lt_cv_dlopen_libs= ++ lt_cv_dlopen_self=no ++ ;; ++ ++ *) ++ ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" ++if test "x$ac_cv_func_shl_load" = xyes ++then : ++ lt_cv_dlopen=shl_load ++else $as_nop ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 ++printf %s "checking for shl_load in -ldld... " >&6; } ++if test ${ac_cv_lib_dld_shl_load+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_check_lib_save_LIBS=$LIBS ++LIBS="-ldld $LIBS" ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++/* Override any GCC internal prototype to avoid an error. ++ Use char because int might match the return type of a GCC ++ builtin and then its argument prototype would still apply. */ ++char shl_load (); ++int ++main (void) ++{ ++return shl_load (); ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_link "$LINENO" ++then : ++ ac_cv_lib_dld_shl_load=yes ++else $as_nop ++ ac_cv_lib_dld_shl_load=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++LIBS=$ac_check_lib_save_LIBS ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 ++printf "%s\n" "$ac_cv_lib_dld_shl_load" >&6; } ++if test "x$ac_cv_lib_dld_shl_load" = xyes ++then : ++ lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld ++else $as_nop ++ ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" ++if test "x$ac_cv_func_dlopen" = xyes ++then : ++ lt_cv_dlopen=dlopen ++else $as_nop ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 ++printf %s "checking for dlopen in -ldl... " >&6; } ++if test ${ac_cv_lib_dl_dlopen+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_check_lib_save_LIBS=$LIBS ++LIBS="-ldl $LIBS" ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++/* Override any GCC internal prototype to avoid an error. ++ Use char because int might match the return type of a GCC ++ builtin and then its argument prototype would still apply. */ ++char dlopen (); ++int ++main (void) ++{ ++return dlopen (); ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_link "$LINENO" ++then : ++ ac_cv_lib_dl_dlopen=yes ++else $as_nop ++ ac_cv_lib_dl_dlopen=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++LIBS=$ac_check_lib_save_LIBS ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 ++printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; } ++if test "x$ac_cv_lib_dl_dlopen" = xyes ++then : ++ lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl ++else $as_nop ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 ++printf %s "checking for dlopen in -lsvld... " >&6; } ++if test ${ac_cv_lib_svld_dlopen+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_check_lib_save_LIBS=$LIBS ++LIBS="-lsvld $LIBS" ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++/* Override any GCC internal prototype to avoid an error. ++ Use char because int might match the return type of a GCC ++ builtin and then its argument prototype would still apply. */ ++char dlopen (); ++int ++main (void) ++{ ++return dlopen (); ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_link "$LINENO" ++then : ++ ac_cv_lib_svld_dlopen=yes ++else $as_nop ++ ac_cv_lib_svld_dlopen=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++LIBS=$ac_check_lib_save_LIBS ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 ++printf "%s\n" "$ac_cv_lib_svld_dlopen" >&6; } ++if test "x$ac_cv_lib_svld_dlopen" = xyes ++then : ++ lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld ++else $as_nop ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 ++printf %s "checking for dld_link in -ldld... " >&6; } ++if test ${ac_cv_lib_dld_dld_link+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ ac_check_lib_save_LIBS=$LIBS ++LIBS="-ldld $LIBS" ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++/* Override any GCC internal prototype to avoid an error. ++ Use char because int might match the return type of a GCC ++ builtin and then its argument prototype would still apply. */ ++char dld_link (); ++int ++main (void) ++{ ++return dld_link (); ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_link "$LINENO" ++then : ++ ac_cv_lib_dld_dld_link=yes ++else $as_nop ++ ac_cv_lib_dld_dld_link=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++LIBS=$ac_check_lib_save_LIBS ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 ++printf "%s\n" "$ac_cv_lib_dld_dld_link" >&6; } ++if test "x$ac_cv_lib_dld_dld_link" = xyes ++then : ++ lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld ++fi ++ ++ ++fi ++ ++ ++fi ++ ++ ++fi ++ ++ ++fi ++ ++ ++fi ++ ++ ;; ++ esac ++ ++ if test no = "$lt_cv_dlopen"; then ++ enable_dlopen=no ++ else ++ enable_dlopen=yes ++ fi ++ ++ case $lt_cv_dlopen in ++ dlopen) ++ save_CPPFLAGS=$CPPFLAGS ++ test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" ++ ++ save_LDFLAGS=$LDFLAGS ++ wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" ++ ++ save_LIBS=$LIBS ++ LIBS="$lt_cv_dlopen_libs $LIBS" ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 ++printf %s "checking whether a program can dlopen itself... " >&6; } ++if test ${lt_cv_dlopen_self+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test yes = "$cross_compiling"; then : ++ lt_cv_dlopen_self=cross ++else ++ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 ++ lt_status=$lt_dlunknown ++ cat > conftest.$ac_ext <<_LT_EOF ++#line $LINENO "configure" ++#include "confdefs.h" ++ ++#if HAVE_DLFCN_H ++#include ++#endif ++ ++#include ++ ++#ifdef RTLD_GLOBAL ++# define LT_DLGLOBAL RTLD_GLOBAL ++#else ++# ifdef DL_GLOBAL ++# define LT_DLGLOBAL DL_GLOBAL ++# else ++# define LT_DLGLOBAL 0 ++# endif ++#endif ++ ++/* We may have to define LT_DLLAZY_OR_NOW in the command line if we ++ find out it does not work in some platform. */ ++#ifndef LT_DLLAZY_OR_NOW ++# ifdef RTLD_LAZY ++# define LT_DLLAZY_OR_NOW RTLD_LAZY ++# else ++# ifdef DL_LAZY ++# define LT_DLLAZY_OR_NOW DL_LAZY ++# else ++# ifdef RTLD_NOW ++# define LT_DLLAZY_OR_NOW RTLD_NOW ++# else ++# ifdef DL_NOW ++# define LT_DLLAZY_OR_NOW DL_NOW ++# else ++# define LT_DLLAZY_OR_NOW 0 ++# endif ++# endif ++# endif ++# endif ++#endif ++ ++/* When -fvisibility=hidden is used, assume the code has been annotated ++ correspondingly for the symbols needed. */ ++#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) ++int fnord () __attribute__((visibility("default"))); ++#endif ++ ++int fnord () { return 42; } ++int main () ++{ ++ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); ++ int status = $lt_dlunknown; ++ ++ if (self) ++ { ++ if (dlsym (self,"fnord")) status = $lt_dlno_uscore; ++ else ++ { ++ if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; ++ else puts (dlerror ()); ++ } ++ /* dlclose (self); */ ++ } ++ else ++ puts (dlerror ()); ++ ++ return status; ++} ++_LT_EOF ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 ++ (eval $ac_link) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then ++ (./conftest; exit; ) >&5 2>/dev/null ++ lt_status=$? ++ case x$lt_status in ++ x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; ++ x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; ++ x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; ++ esac ++ else : ++ # compilation failed ++ lt_cv_dlopen_self=no ++ fi ++fi ++rm -fr conftest* ++ ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 ++printf "%s\n" "$lt_cv_dlopen_self" >&6; } ++ ++ if test yes = "$lt_cv_dlopen_self"; then ++ wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 ++printf %s "checking whether a statically linked program can dlopen itself... " >&6; } ++if test ${lt_cv_dlopen_self_static+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test yes = "$cross_compiling"; then : ++ lt_cv_dlopen_self_static=cross ++else ++ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 ++ lt_status=$lt_dlunknown ++ cat > conftest.$ac_ext <<_LT_EOF ++#line $LINENO "configure" ++#include "confdefs.h" ++ ++#if HAVE_DLFCN_H ++#include ++#endif ++ ++#include ++ ++#ifdef RTLD_GLOBAL ++# define LT_DLGLOBAL RTLD_GLOBAL ++#else ++# ifdef DL_GLOBAL ++# define LT_DLGLOBAL DL_GLOBAL ++# else ++# define LT_DLGLOBAL 0 ++# endif ++#endif ++ ++/* We may have to define LT_DLLAZY_OR_NOW in the command line if we ++ find out it does not work in some platform. */ ++#ifndef LT_DLLAZY_OR_NOW ++# ifdef RTLD_LAZY ++# define LT_DLLAZY_OR_NOW RTLD_LAZY ++# else ++# ifdef DL_LAZY ++# define LT_DLLAZY_OR_NOW DL_LAZY ++# else ++# ifdef RTLD_NOW ++# define LT_DLLAZY_OR_NOW RTLD_NOW ++# else ++# ifdef DL_NOW ++# define LT_DLLAZY_OR_NOW DL_NOW ++# else ++# define LT_DLLAZY_OR_NOW 0 ++# endif ++# endif ++# endif ++# endif ++#endif ++ ++/* When -fvisibility=hidden is used, assume the code has been annotated ++ correspondingly for the symbols needed. */ ++#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) ++int fnord () __attribute__((visibility("default"))); ++#endif ++ ++int fnord () { return 42; } ++int main () ++{ ++ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); ++ int status = $lt_dlunknown; ++ ++ if (self) ++ { ++ if (dlsym (self,"fnord")) status = $lt_dlno_uscore; ++ else ++ { ++ if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; ++ else puts (dlerror ()); ++ } ++ /* dlclose (self); */ ++ } ++ else ++ puts (dlerror ()); ++ ++ return status; ++} ++_LT_EOF ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 ++ (eval $ac_link) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then ++ (./conftest; exit; ) >&5 2>/dev/null ++ lt_status=$? ++ case x$lt_status in ++ x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; ++ x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; ++ x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; ++ esac ++ else : ++ # compilation failed ++ lt_cv_dlopen_self_static=no ++ fi ++fi ++rm -fr conftest* ++ ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 ++printf "%s\n" "$lt_cv_dlopen_self_static" >&6; } ++ fi ++ ++ CPPFLAGS=$save_CPPFLAGS ++ LDFLAGS=$save_LDFLAGS ++ LIBS=$save_LIBS ++ ;; ++ esac ++ ++ case $lt_cv_dlopen_self in ++ yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; ++ *) enable_dlopen_self=unknown ;; ++ esac ++ ++ case $lt_cv_dlopen_self_static in ++ yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; ++ *) enable_dlopen_self_static=unknown ;; ++ esac ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++striplib= ++old_striplib= ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 ++printf %s "checking whether stripping libraries is possible... " >&6; } ++if test -z "$STRIP"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++else ++ if $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then ++ old_striplib="$STRIP --strip-debug" ++ striplib="$STRIP --strip-unneeded" ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 ++printf "%s\n" "yes" >&6; } ++ else ++ case $host_os in ++ darwin*) ++ # FIXME - insert some real tests, host_os isn't really good enough ++ striplib="$STRIP -x" ++ old_striplib="$STRIP -S" ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 ++printf "%s\n" "yes" >&6; } ++ ;; ++ freebsd*) ++ if $STRIP -V 2>&1 | $GREP "elftoolchain" >/dev/null; then ++ old_striplib="$STRIP --strip-debug" ++ striplib="$STRIP --strip-unneeded" ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 ++printf "%s\n" "yes" >&6; } ++ else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++ fi ++ ;; ++ *) ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++ ;; ++ esac ++ fi ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ # Report what library types will actually be built ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 ++printf %s "checking if libtool supports shared libraries... " >&6; } ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 ++printf "%s\n" "$can_build_shared" >&6; } ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 ++printf %s "checking whether to build shared libraries... " >&6; } ++ test no = "$can_build_shared" && enable_shared=no ++ ++ # On AIX, shared libraries and static libraries use the same namespace, and ++ # are all built from PIC. ++ case $host_os in ++ aix3*) ++ test yes = "$enable_shared" && enable_static=no ++ if test -n "$RANLIB"; then ++ archive_cmds="$archive_cmds~\$RANLIB \$lib" ++ postinstall_cmds='$RANLIB $lib' ++ fi ++ ;; ++ ++ aix[4-9]*) ++ if test ia64 != "$host_cpu"; then ++ case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in ++ yes,aix,yes) ;; # shared object as lib.so file only ++ yes,svr4,*) ;; # shared object as lib.so archive member only ++ yes,*) enable_static=no ;; # shared object in lib.a archive as well ++ esac ++ fi ++ ;; ++ esac ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 ++printf "%s\n" "$enable_shared" >&6; } ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 ++printf %s "checking whether to build static libraries... " >&6; } ++ # Make sure either enable_shared or enable_static is yes. ++ test yes = "$enable_shared" || enable_static=yes ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 ++printf "%s\n" "$enable_static" >&6; } ++ ++ ++ ++ ++fi ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++CC=$lt_save_CC ++ ++ if test -n "$CXX" && ( test no != "$CXX" && ++ ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || ++ (test g++ != "$CXX"))); then ++ ac_ext=cpp ++ac_cpp='$CXXCPP $CPPFLAGS' ++ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_cxx_compiler_gnu ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 ++printf %s "checking how to run the C++ preprocessor... " >&6; } ++if test -z "$CXXCPP"; then ++ if test ${ac_cv_prog_CXXCPP+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ # Double quotes because $CXX needs to be expanded ++ for CXXCPP in "$CXX -E" cpp /lib/cpp ++ do ++ ac_preproc_ok=false ++for ac_cxx_preproc_warn_flag in '' yes ++do ++ # Use a header file that comes with gcc, so configuring glibc ++ # with a fresh cross-compiler works. ++ # On the NeXT, cc -E runs the code through the compiler's parser, ++ # not just through cpp. "Syntax error" is here to catch this case. ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#include ++ Syntax error ++_ACEOF ++if ac_fn_cxx_try_cpp "$LINENO" ++then : ++ ++else $as_nop ++ # Broken: fails on valid input. ++continue ++fi ++rm -f conftest.err conftest.i conftest.$ac_ext ++ ++ # OK, works on sane cases. Now check whether nonexistent headers ++ # can be detected and how. ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#include ++_ACEOF ++if ac_fn_cxx_try_cpp "$LINENO" ++then : ++ # Broken: success on invalid input. ++continue ++else $as_nop ++ # Passes both tests. ++ac_preproc_ok=: ++break ++fi ++rm -f conftest.err conftest.i conftest.$ac_ext ++ ++done ++# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. ++rm -f conftest.i conftest.err conftest.$ac_ext ++if $ac_preproc_ok ++then : ++ break ++fi ++ ++ done ++ ac_cv_prog_CXXCPP=$CXXCPP ++ ++fi ++ CXXCPP=$ac_cv_prog_CXXCPP ++else ++ ac_cv_prog_CXXCPP=$CXXCPP ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 ++printf "%s\n" "$CXXCPP" >&6; } ++ac_preproc_ok=false ++for ac_cxx_preproc_warn_flag in '' yes ++do ++ # Use a header file that comes with gcc, so configuring glibc ++ # with a fresh cross-compiler works. ++ # On the NeXT, cc -E runs the code through the compiler's parser, ++ # not just through cpp. "Syntax error" is here to catch this case. ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#include ++ Syntax error ++_ACEOF ++if ac_fn_cxx_try_cpp "$LINENO" ++then : ++ ++else $as_nop ++ # Broken: fails on valid input. ++continue ++fi ++rm -f conftest.err conftest.i conftest.$ac_ext ++ ++ # OK, works on sane cases. Now check whether nonexistent headers ++ # can be detected and how. ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#include ++_ACEOF ++if ac_fn_cxx_try_cpp "$LINENO" ++then : ++ # Broken: success on invalid input. ++continue ++else $as_nop ++ # Passes both tests. ++ac_preproc_ok=: ++break ++fi ++rm -f conftest.err conftest.i conftest.$ac_ext ++ ++done ++# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. ++rm -f conftest.i conftest.err conftest.$ac_ext ++if $ac_preproc_ok ++then : ++ ++else $as_nop ++ { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check ++See \`config.log' for more details" "$LINENO" 5; } ++fi ++ ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++else ++ _lt_caught_CXX_error=yes ++fi ++ ++ac_ext=cpp ++ac_cpp='$CXXCPP $CPPFLAGS' ++ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_cxx_compiler_gnu ++ ++archive_cmds_need_lc_CXX=no ++allow_undefined_flag_CXX= ++always_export_symbols_CXX=no ++archive_expsym_cmds_CXX= ++compiler_needs_object_CXX=no ++export_dynamic_flag_spec_CXX= ++hardcode_direct_CXX=no ++hardcode_direct_absolute_CXX=no ++hardcode_libdir_flag_spec_CXX= ++hardcode_libdir_separator_CXX= ++hardcode_minus_L_CXX=no ++hardcode_shlibpath_var_CXX=unsupported ++hardcode_automatic_CXX=no ++inherit_rpath_CXX=no ++module_cmds_CXX= ++module_expsym_cmds_CXX= ++link_all_deplibs_CXX=unknown ++old_archive_cmds_CXX=$old_archive_cmds ++reload_flag_CXX=$reload_flag ++reload_cmds_CXX=$reload_cmds ++no_undefined_flag_CXX= ++whole_archive_flag_spec_CXX= ++enable_shared_with_static_runtimes_CXX=no ++ ++# Source file extension for C++ test sources. ++ac_ext=cpp ++ ++# Object file extension for compiled C++ test sources. ++objext=o ++objext_CXX=$objext ++ ++# No sense in running all these tests if we already determined that ++# the CXX compiler isn't working. Some variables (like enable_shared) ++# are currently assumed to apply to all compilers on this platform, ++# and will be corrupted by setting them based on a non-working compiler. ++if test yes != "$_lt_caught_CXX_error"; then ++ # Code to be used in simple compile tests ++ lt_simple_compile_test_code="int some_variable = 0;" ++ ++ # Code to be used in simple link tests ++ lt_simple_link_test_code='int main(int, char *[]) { return(0); }' ++ ++ # ltmain only uses $CC for tagged configurations so make sure $CC is set. ++ ++ ++ ++ ++ ++ ++# If no C compiler was specified, use CC. ++LTCC=${LTCC-"$CC"} ++ ++# If no C compiler flags were specified, use CFLAGS. ++LTCFLAGS=${LTCFLAGS-"$CFLAGS"} ++ ++# Allow CC to be a program name with arguments. ++compiler=$CC ++ ++ ++ # save warnings/boilerplate of simple test code ++ ac_outfile=conftest.$ac_objext ++echo "$lt_simple_compile_test_code" >conftest.$ac_ext ++eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err ++_lt_compiler_boilerplate=`cat conftest.err` ++$RM conftest* ++ ++ ac_outfile=conftest.$ac_objext ++echo "$lt_simple_link_test_code" >conftest.$ac_ext ++eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err ++_lt_linker_boilerplate=`cat conftest.err` ++$RM -r conftest* ++ ++ ++ # Allow CC to be a program name with arguments. ++ lt_save_CC=$CC ++ lt_save_CFLAGS=$CFLAGS ++ lt_save_LD=$LD ++ lt_save_GCC=$GCC ++ GCC=$GXX ++ lt_save_with_gnu_ld=$with_gnu_ld ++ lt_save_path_LD=$lt_cv_path_LD ++ if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then ++ lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx ++ else ++ $as_unset lt_cv_prog_gnu_ld ++ fi ++ if test -n "${lt_cv_path_LDCXX+set}"; then ++ lt_cv_path_LD=$lt_cv_path_LDCXX ++ else ++ $as_unset lt_cv_path_LD ++ fi ++ test -z "${LDCXX+set}" || LD=$LDCXX ++ CC=${CXX-"c++"} ++ CFLAGS=$CXXFLAGS ++ compiler=$CC ++ compiler_CXX=$CC ++ func_cc_basename $compiler ++cc_basename=$func_cc_basename_result ++ ++ ++ if test -n "$compiler"; then ++ # We don't want -fno-exception when compiling C++ code, so set the ++ # no_builtin_flag separately ++ if test yes = "$GXX"; then ++ lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' ++ else ++ lt_prog_compiler_no_builtin_flag_CXX= ++ fi ++ ++ if test yes = "$GXX"; then ++ # Set up default GNU C++ configuration ++ ++ ++ ++# Check whether --with-gnu-ld was given. ++if test ${with_gnu_ld+y} ++then : ++ withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes ++else $as_nop ++ with_gnu_ld=no ++fi ++ ++ac_prog=ld ++if test yes = "$GCC"; then ++ # Check if gcc -print-prog-name=ld gives a path. ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 ++printf %s "checking for ld used by $CC... " >&6; } ++ case $host in ++ *-*-mingw*) ++ # gcc leaves a trailing carriage return, which upsets mingw ++ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; ++ *) ++ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; ++ esac ++ case $ac_prog in ++ # Accept absolute paths. ++ [\\/]* | ?:[\\/]*) ++ re_direlt='/[^/][^/]*/\.\./' ++ # Canonicalize the pathname of ld ++ ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` ++ while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ++ ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` ++ done ++ test -z "$LD" && LD=$ac_prog ++ ;; ++ "") ++ # If it fails, then pretend we aren't using GCC. ++ ac_prog=ld ++ ;; ++ *) ++ # If it is relative, then search for the first ld in PATH. ++ with_gnu_ld=unknown ++ ;; ++ esac ++elif test yes = "$with_gnu_ld"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 ++printf %s "checking for GNU ld... " >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 ++printf %s "checking for non-GNU ld... " >&6; } ++fi ++if test ${lt_cv_path_LD+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ if test -z "$LD"; then ++ lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ++ for ac_dir in $PATH; do ++ IFS=$lt_save_ifs ++ test -z "$ac_dir" && ac_dir=. ++ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then ++ lt_cv_path_LD=$ac_dir/$ac_prog ++ # Check to see if the program is GNU ld. I'd rather use --version, ++ # but apparently some variants of GNU ld only accept -v. ++ # Break only if it was the GNU/non-GNU ld that we prefer. ++ case `"$lt_cv_path_LD" -v 2>&1 &5 ++printf "%s\n" "$LD" >&6; } ++else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++printf "%s\n" "no" >&6; } ++fi ++test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 ++printf %s "checking if the linker ($LD) is GNU ld... " >&6; } ++if test ${lt_cv_prog_gnu_ld+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ # I'd rather use --version here, but apparently some GNU lds only accept -v. ++case `$LD -v 2>&1 &5 ++printf "%s\n" "$lt_cv_prog_gnu_ld" >&6; } ++with_gnu_ld=$lt_cv_prog_gnu_ld ++ ++ ++ ++ ++ ++ ++ ++ # Check if GNU C++ uses GNU ld as the underlying linker, since the ++ # archiving commands below assume that GNU ld is being used. ++ if test yes = "$with_gnu_ld"; then ++ archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ ++ hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' ++ export_dynamic_flag_spec_CXX='$wl--export-dynamic' ++ ++ # If archive_cmds runs LD, not CC, wlarc should be empty ++ # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to ++ # investigate it a little bit more. (MM) ++ wlarc='$wl' ++ ++ # ancient GNU ld didn't support --whole-archive et. al. ++ if eval "`$CC -print-prog-name=ld` --help 2>&1" | ++ $GREP 'no-whole-archive' > /dev/null; then ++ whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' ++ else ++ whole_archive_flag_spec_CXX= ++ fi ++ else ++ with_gnu_ld=no ++ wlarc= ++ ++ # A generic and very simple default shared library creation ++ # command for GNU C++ for the case where it uses the native ++ # linker, instead of GNU ld. If possible, this setting should ++ # overridden to take advantage of the native linker features on ++ # the platform it is being used on. ++ archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' ++ fi ++ ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' ++ ++ else ++ GXX=no ++ with_gnu_ld=no ++ wlarc= ++ fi ++ ++ # PORTME: fill in a description of your system's C++ link characteristics ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 ++printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } ++ ld_shlibs_CXX=yes ++ case $host_os in ++ aix3*) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ aix[4-9]*) ++ if test ia64 = "$host_cpu"; then ++ # On IA64, the linker does run time linking by default, so we don't ++ # have to do anything special. ++ aix_use_runtimelinking=no ++ exp_sym_flag='-Bexport' ++ no_entry_flag= ++ else ++ aix_use_runtimelinking=no ++ ++ # Test if we are trying to use run time linking or normal ++ # AIX style linking. If -brtl is somewhere in LDFLAGS, we ++ # have runtime linking enabled, and use it for executables. ++ # For shared libraries, we enable/disable runtime linking ++ # depending on the kind of the shared library created - ++ # when "with_aix_soname,aix_use_runtimelinking" is: ++ # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables ++ # "aix,yes" lib.so shared, rtl:yes, for executables ++ # lib.a static archive ++ # "both,no" lib.so.V(shr.o) shared, rtl:yes ++ # lib.a(lib.so.V) shared, rtl:no, for executables ++ # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables ++ # lib.a(lib.so.V) shared, rtl:no ++ # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables ++ # lib.a static archive ++ case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) ++ for ld_flag in $LDFLAGS; do ++ case $ld_flag in ++ *-brtl*) ++ aix_use_runtimelinking=yes ++ break ++ ;; ++ esac ++ done ++ if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then ++ # With aix-soname=svr4, we create the lib.so.V shared archives only, ++ # so we don't have lib.a shared libs to link our executables. ++ # We have to force runtime linking in this case. ++ aix_use_runtimelinking=yes ++ LDFLAGS="$LDFLAGS -Wl,-brtl" ++ fi ++ ;; ++ esac ++ ++ exp_sym_flag='-bexport' ++ no_entry_flag='-bnoentry' ++ fi ++ ++ # When large executables or shared objects are built, AIX ld can ++ # have problems creating the table of contents. If linking a library ++ # or program results in "error TOC overflow" add -mminimal-toc to ++ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not ++ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. ++ ++ archive_cmds_CXX='' ++ hardcode_direct_CXX=yes ++ hardcode_direct_absolute_CXX=yes ++ hardcode_libdir_separator_CXX=':' ++ link_all_deplibs_CXX=yes ++ file_list_spec_CXX='$wl-f,' ++ case $with_aix_soname,$aix_use_runtimelinking in ++ aix,*) ;; # no import file ++ svr4,* | *,yes) # use import file ++ # The Import File defines what to hardcode. ++ hardcode_direct_CXX=no ++ hardcode_direct_absolute_CXX=no ++ ;; ++ esac ++ ++ if test yes = "$GXX"; then ++ case $host_os in aix4.[012]|aix4.[012].*) ++ # We only want to do this on AIX 4.2 and lower, the check ++ # below for broken collect2 doesn't work under 4.3+ ++ collect2name=`$CC -print-prog-name=collect2` ++ if test -f "$collect2name" && ++ strings "$collect2name" | $GREP resolve_lib_name >/dev/null ++ then ++ # We have reworked collect2 ++ : ++ else ++ # We have old collect2 ++ hardcode_direct_CXX=unsupported ++ # It fails to find uninstalled libraries when the uninstalled ++ # path is not listed in the libpath. Setting hardcode_minus_L ++ # to unsupported forces relinking ++ hardcode_minus_L_CXX=yes ++ hardcode_libdir_flag_spec_CXX='-L$libdir' ++ hardcode_libdir_separator_CXX= ++ fi ++ esac ++ shared_flag='-shared' ++ if test yes = "$aix_use_runtimelinking"; then ++ shared_flag=$shared_flag' $wl-G' ++ fi ++ # Need to ensure runtime linking is disabled for the traditional ++ # shared library, or the linker may eventually find shared libraries ++ # /with/ Import File - we do not want to mix them. ++ shared_flag_aix='-shared' ++ shared_flag_svr4='-shared $wl-G' ++ else ++ # not using gcc ++ if test ia64 = "$host_cpu"; then ++ # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release ++ # chokes on -Wl,-G. The following line is correct: ++ shared_flag='-G' ++ else ++ if test yes = "$aix_use_runtimelinking"; then ++ shared_flag='$wl-G' ++ else ++ shared_flag='$wl-bM:SRE' ++ fi ++ shared_flag_aix='$wl-bM:SRE' ++ shared_flag_svr4='$wl-G' ++ fi ++ fi ++ ++ export_dynamic_flag_spec_CXX='$wl-bexpall' ++ # It seems that -bexpall does not export symbols beginning with ++ # underscore (_), so it is better to generate a list of symbols to ++ # export. ++ always_export_symbols_CXX=yes ++ if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then ++ # Warning - without using the other runtime loading flags (-brtl), ++ # -berok will link without error, but may produce a broken library. ++ # The "-G" linker flag allows undefined symbols. ++ no_undefined_flag_CXX='-bernotok' ++ # Determine the default libpath from the value encoded in an empty ++ # executable. ++ if test set = "${lt_cv_aix_libpath+set}"; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if test ${lt_cv_aix_libpath__CXX+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_cxx_try_link "$LINENO" ++then : ++ ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath__CXX"; then ++ lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++ if test -z "$lt_cv_aix_libpath__CXX"; then ++ lt_cv_aix_libpath__CXX=/usr/lib:/lib ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath__CXX ++fi ++ ++ hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" ++ ++ archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag ++ else ++ if test ia64 = "$host_cpu"; then ++ hardcode_libdir_flag_spec_CXX='$wl-R $libdir:/usr/lib:/lib' ++ allow_undefined_flag_CXX="-z nodefs" ++ archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" ++ else ++ # Determine the default libpath from the value encoded in an ++ # empty executable. ++ if test set = "${lt_cv_aix_libpath+set}"; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if test ${lt_cv_aix_libpath__CXX+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_cxx_try_link "$LINENO" ++then : ++ ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath__CXX"; then ++ lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++ if test -z "$lt_cv_aix_libpath__CXX"; then ++ lt_cv_aix_libpath__CXX=/usr/lib:/lib ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath__CXX ++fi ++ ++ hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" ++ # Warning - without using the other run time loading flags, ++ # -berok will link without error, but may produce a broken library. ++ no_undefined_flag_CXX=' $wl-bernotok' ++ allow_undefined_flag_CXX=' $wl-berok' ++ if test yes = "$with_gnu_ld"; then ++ # We only use this code for GNU lds that support --whole-archive. ++ whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' ++ else ++ # Exported symbols can be pulled into shared objects from archives ++ whole_archive_flag_spec_CXX='$convenience' ++ fi ++ archive_cmds_need_lc_CXX=yes ++ archive_expsym_cmds_CXX='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' ++ # -brtl affects multiple linker settings, -berok does not and is overridden later ++ compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' ++ if test svr4 != "$with_aix_soname"; then ++ # This is similar to how AIX traditionally builds its shared ++ # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. ++ archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' ++ fi ++ if test aix != "$with_aix_soname"; then ++ archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' ++ else ++ # used by -dlpreopen to get the symbols ++ archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$MV $output_objdir/$realname.d/$soname $output_objdir' ++ fi ++ archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$RM -r $output_objdir/$realname.d' ++ fi ++ fi ++ ;; ++ ++ beos*) ++ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then ++ allow_undefined_flag_CXX=unsupported ++ # Joseph Beckenbach says some releases of gcc ++ # support --undefined. This deserves some investigation. FIXME ++ archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ else ++ ld_shlibs_CXX=no ++ fi ++ ;; ++ ++ chorus*) ++ case $cc_basename in ++ *) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ esac ++ ;; ++ ++ cygwin* | mingw* | pw32* | cegcc*) ++ case $GXX,$cc_basename in ++ ,cl* | no,cl* | ,icl* | no,icl*) ++ # Native MSVC or ICC ++ # hardcode_libdir_flag_spec is actually meaningless, as there is ++ # no search path for DLLs. ++ hardcode_libdir_flag_spec_CXX=' ' ++ allow_undefined_flag_CXX=unsupported ++ always_export_symbols_CXX=yes ++ file_list_spec_CXX='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=.dll ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' ++ archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then ++ cp "$export_symbols" "$output_objdir/$soname.def"; ++ echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; ++ else ++ $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' ++ enable_shared_with_static_runtimes_CXX=yes ++ # Don't use ranlib ++ old_postinstall_cmds_CXX='chmod 644 $oldlib' ++ postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile=$lt_outputfile.exe ++ lt_tool_outputfile=$lt_tool_outputfile.exe ++ ;; ++ esac~ ++ func_to_tool_file "$lt_outputfile"~ ++ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # g++ ++ # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, ++ # as there is no search path for DLLs. ++ hardcode_libdir_flag_spec_CXX='-L$libdir' ++ export_dynamic_flag_spec_CXX='$wl--export-all-symbols' ++ allow_undefined_flag_CXX=unsupported ++ always_export_symbols_CXX=no ++ enable_shared_with_static_runtimes_CXX=yes ++ ++ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then ++ archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ # If the export-symbols file already is a .def file, use it as ++ # is; otherwise, prepend EXPORTS... ++ archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then ++ cp $export_symbols $output_objdir/$soname.def; ++ else ++ echo EXPORTS > $output_objdir/$soname.def; ++ cat $export_symbols >> $output_objdir/$soname.def; ++ fi~ ++ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ else ++ ld_shlibs_CXX=no ++ fi ++ ;; ++ esac ++ ;; ++ darwin* | rhapsody*) ++ ++ ++ archive_cmds_need_lc_CXX=no ++ hardcode_direct_CXX=no ++ hardcode_automatic_CXX=yes ++ hardcode_shlibpath_var_CXX=unsupported ++ if test yes = "$lt_cv_ld_force_load"; then ++ whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' ++ ++ else ++ whole_archive_flag_spec_CXX='' ++ fi ++ link_all_deplibs_CXX=yes ++ allow_undefined_flag_CXX=$_lt_dar_allow_undefined ++ case $cc_basename in ++ ifort*|nagfor*) _lt_dar_can_shared=yes ;; ++ *) _lt_dar_can_shared=$GCC ;; ++ esac ++ if test yes = "$_lt_dar_can_shared"; then ++ output_verbose_link_cmd=func_echo_all ++ archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" ++ module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" ++ archive_expsym_cmds_CXX="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" ++ module_expsym_cmds_CXX="$SED -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" ++ if test yes != "$lt_cv_apple_cc_single_mod"; then ++ archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" ++ archive_expsym_cmds_CXX="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" ++ fi ++ ++ else ++ ld_shlibs_CXX=no ++ fi ++ ++ ;; ++ ++ os2*) ++ hardcode_libdir_flag_spec_CXX='-L$libdir' ++ hardcode_minus_L_CXX=yes ++ allow_undefined_flag_CXX=unsupported ++ shrext_cmds=.dll ++ archive_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ ++ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ ++ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ ++ $ECHO EXPORTS >> $output_objdir/$libname.def~ ++ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ ++ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ ++ emximp -o $lib $output_objdir/$libname.def' ++ archive_expsym_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ ++ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ ++ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ ++ $ECHO EXPORTS >> $output_objdir/$libname.def~ ++ prefix_cmds="$SED"~ ++ if test EXPORTS = "`$SED 1q $export_symbols`"; then ++ prefix_cmds="$prefix_cmds -e 1d"; ++ fi~ ++ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ ++ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ ++ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ ++ emximp -o $lib $output_objdir/$libname.def' ++ old_archive_From_new_cmds_CXX='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' ++ enable_shared_with_static_runtimes_CXX=yes ++ file_list_spec_CXX='@' ++ ;; ++ ++ dgux*) ++ case $cc_basename in ++ ec++*) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ ghcx*) ++ # Green Hills C++ Compiler ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ *) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ esac ++ ;; ++ ++ freebsd2.*) ++ # C++ shared libraries reported to be fairly broken before ++ # switch to ELF ++ ld_shlibs_CXX=no ++ ;; ++ ++ freebsd-elf*) ++ archive_cmds_need_lc_CXX=no ++ ;; ++ ++ freebsd* | dragonfly* | midnightbsd*) ++ # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF ++ # conventions ++ ld_shlibs_CXX=yes ++ ;; ++ ++ haiku*) ++ archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ link_all_deplibs_CXX=yes ++ ;; ++ ++ hpux9*) ++ hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' ++ hardcode_libdir_separator_CXX=: ++ export_dynamic_flag_spec_CXX='$wl-E' ++ hardcode_direct_CXX=yes ++ hardcode_minus_L_CXX=yes # Not in the search PATH, ++ # but as the default ++ # location of the library. ++ ++ case $cc_basename in ++ CC*) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ aCC*) ++ archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ # ++ # There doesn't appear to be a way to prevent this compiler from ++ # explicitly linking system object files so we need to strip them ++ # from the output so that they don't get included in the library ++ # dependencies. ++ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ++ ;; ++ *) ++ if test yes = "$GXX"; then ++ archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' ++ else ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ fi ++ ;; ++ esac ++ ;; ++ ++ hpux10*|hpux11*) ++ if test no = "$with_gnu_ld"; then ++ hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' ++ hardcode_libdir_separator_CXX=: ++ ++ case $host_cpu in ++ hppa*64*|ia64*) ++ ;; ++ *) ++ export_dynamic_flag_spec_CXX='$wl-E' ++ ;; ++ esac ++ fi ++ case $host_cpu in ++ hppa*64*|ia64*) ++ hardcode_direct_CXX=no ++ hardcode_shlibpath_var_CXX=no ++ ;; ++ *) ++ hardcode_direct_CXX=yes ++ hardcode_direct_absolute_CXX=yes ++ hardcode_minus_L_CXX=yes # Not in the search PATH, ++ # but as the default ++ # location of the library. ++ ;; ++ esac ++ ++ case $cc_basename in ++ CC*) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ aCC*) ++ case $host_cpu in ++ hppa*64*) ++ archive_cmds_CXX='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ ;; ++ ia64*) ++ archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ ;; ++ *) ++ archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ ;; ++ esac ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ # ++ # There doesn't appear to be a way to prevent this compiler from ++ # explicitly linking system object files so we need to strip them ++ # from the output so that they don't get included in the library ++ # dependencies. ++ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ++ ;; ++ *) ++ if test yes = "$GXX"; then ++ if test no = "$with_gnu_ld"; then ++ case $host_cpu in ++ hppa*64*) ++ archive_cmds_CXX='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ ;; ++ ia64*) ++ archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ ;; ++ *) ++ archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ ;; ++ esac ++ fi ++ else ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ fi ++ ;; ++ esac ++ ;; ++ ++ interix[3-9]*) ++ hardcode_direct_CXX=no ++ hardcode_shlibpath_var_CXX=no ++ hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' ++ export_dynamic_flag_spec_CXX='$wl-E' ++ # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. ++ # Instead, shared libraries are loaded at an image base (0x10000000 by ++ # default) and relocated if they conflict, which is a slow very memory ++ # consuming and fragmenting process. To avoid this, we pick a random, ++ # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link ++ # time. Moving up from 0x10000000 also allows more sbrk(2) space. ++ archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ++ archive_expsym_cmds_CXX='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ++ ;; ++ irix5* | irix6*) ++ case $cc_basename in ++ CC*) ++ # SGI C++ ++ archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ ++ # Archives containing C++ object files must be created using ++ # "CC -ar", where "CC" is the IRIX C++ compiler. This is ++ # necessary to make sure instantiated templates are included ++ # in the archive. ++ old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' ++ ;; ++ *) ++ if test yes = "$GXX"; then ++ if test no = "$with_gnu_ld"; then ++ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ else ++ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' ++ fi ++ fi ++ link_all_deplibs_CXX=yes ++ ;; ++ esac ++ hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' ++ hardcode_libdir_separator_CXX=: ++ inherit_rpath_CXX=yes ++ ;; ++ ++ linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ case $cc_basename in ++ KCC*) ++ # Kuck and Associates, Inc. (KAI) C++ Compiler ++ ++ # KCC will only create a shared library if the output file ++ # ends with ".so" (or ".sl" for HP-UX), so rename the library ++ # to its proper name (with version) after linking. ++ archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' ++ archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ # ++ # There doesn't appear to be a way to prevent this compiler from ++ # explicitly linking system object files so we need to strip them ++ # from the output so that they don't get included in the library ++ # dependencies. ++ output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ++ ++ hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' ++ export_dynamic_flag_spec_CXX='$wl--export-dynamic' ++ ++ # Archives containing C++ object files must be created using ++ # "CC -Bstatic", where "CC" is the KAI C++ compiler. ++ old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ++ ;; ++ icpc* | ecpc* ) ++ # Intel C++ ++ with_gnu_ld=yes ++ # version 8.0 and above of icpc choke on multiply defined symbols ++ # if we add $predep_objects and $postdep_objects, however 7.1 and ++ # earlier do not add the objects themselves. ++ case `$CC -V 2>&1` in ++ *"Version 7."*) ++ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ ;; ++ *) # Version 8.0 or newer ++ tmp_idyn= ++ case $host_cpu in ++ ia64*) tmp_idyn=' -i_dynamic';; ++ esac ++ archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ ;; ++ esac ++ archive_cmds_need_lc_CXX=no ++ hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' ++ export_dynamic_flag_spec_CXX='$wl--export-dynamic' ++ whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' ++ ;; ++ pgCC* | pgcpp*) ++ # Portland Group C++ compiler ++ case `$CC -V` in ++ *pgCC\ [1-5].* | *pgcpp\ [1-5].*) ++ prelink_cmds_CXX='tpldir=Template.dir~ ++ rm -rf $tpldir~ ++ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ ++ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' ++ old_archive_cmds_CXX='tpldir=Template.dir~ ++ rm -rf $tpldir~ ++ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ ++ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ ++ $RANLIB $oldlib' ++ archive_cmds_CXX='tpldir=Template.dir~ ++ rm -rf $tpldir~ ++ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ ++ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_expsym_cmds_CXX='tpldir=Template.dir~ ++ rm -rf $tpldir~ ++ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ ++ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ ;; ++ *) # Version 6 and above use weak symbols ++ archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ ;; ++ esac ++ ++ hardcode_libdir_flag_spec_CXX='$wl--rpath $wl$libdir' ++ export_dynamic_flag_spec_CXX='$wl--export-dynamic' ++ whole_archive_flag_spec_CXX='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ ;; ++ cxx*) ++ # Compaq C++ ++ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' ++ ++ runpath_var=LD_RUN_PATH ++ hardcode_libdir_flag_spec_CXX='-rpath $libdir' ++ hardcode_libdir_separator_CXX=: ++ ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ # ++ # There doesn't appear to be a way to prevent this compiler from ++ # explicitly linking system object files so we need to strip them ++ # from the output so that they don't get included in the library ++ # dependencies. ++ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' ++ ;; ++ xl* | mpixl* | bgxl*) ++ # IBM XL 8.0 on PPC, with GNU ld ++ hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' ++ export_dynamic_flag_spec_CXX='$wl--export-dynamic' ++ archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ if test yes = "$supports_anon_versioning"; then ++ archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ ++ cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ ++ echo "local: *; };" >> $output_objdir/$libname.ver~ ++ $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' ++ fi ++ ;; ++ *) ++ case `$CC -V 2>&1 | $SED 5q` in ++ *Sun\ C*) ++ # Sun C++ 5.9 ++ no_undefined_flag_CXX=' -zdefs' ++ archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ archive_expsym_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' ++ hardcode_libdir_flag_spec_CXX='-R$libdir' ++ whole_archive_flag_spec_CXX='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ compiler_needs_object_CXX=yes ++ ++ # Not sure whether something based on ++ # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 ++ # would be better. ++ output_verbose_link_cmd='func_echo_all' ++ ++ # Archives containing C++ object files must be created using ++ # "CC -xar", where "CC" is the Sun C++ compiler. This is ++ # necessary to make sure instantiated templates are included ++ # in the archive. ++ old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ++ ;; ++ esac ++ ;; ++ esac ++ ;; ++ ++ lynxos*) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ ++ m88k*) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ ++ mvs*) ++ case $cc_basename in ++ cxx*) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ *) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ esac ++ ;; ++ ++ netbsd*) ++ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then ++ archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' ++ wlarc= ++ hardcode_libdir_flag_spec_CXX='-R$libdir' ++ hardcode_direct_CXX=yes ++ hardcode_shlibpath_var_CXX=no ++ fi ++ # Workaround some broken pre-1.5 toolchains ++ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ++ ;; ++ ++ *nto* | *qnx*) ++ ld_shlibs_CXX=yes ++ ;; ++ ++ openbsd* | bitrig*) ++ if test -f /usr/libexec/ld.so; then ++ hardcode_direct_CXX=yes ++ hardcode_shlibpath_var_CXX=no ++ hardcode_direct_absolute_CXX=yes ++ archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' ++ hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' ++ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then ++ archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' ++ export_dynamic_flag_spec_CXX='$wl-E' ++ whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' ++ fi ++ output_verbose_link_cmd=func_echo_all ++ else ++ ld_shlibs_CXX=no ++ fi ++ ;; ++ ++ osf3* | osf4* | osf5*) ++ case $cc_basename in ++ KCC*) ++ # Kuck and Associates, Inc. (KAI) C++ Compiler ++ ++ # KCC will only create a shared library if the output file ++ # ends with ".so" (or ".sl" for HP-UX), so rename the library ++ # to its proper name (with version) after linking. ++ archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' ++ ++ hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' ++ hardcode_libdir_separator_CXX=: ++ ++ # Archives containing C++ object files must be created using ++ # the KAI C++ compiler. ++ case $host in ++ osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; ++ *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;; ++ esac ++ ;; ++ RCC*) ++ # Rational C++ 2.4.1 ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ cxx*) ++ case $host in ++ osf3*) ++ allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' ++ archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' ++ ;; ++ *) ++ allow_undefined_flag_CXX=' -expect_unresolved \*' ++ archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ ++ echo "-hidden">> $lib.exp~ ++ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ ++ $RM $lib.exp' ++ hardcode_libdir_flag_spec_CXX='-rpath $libdir' ++ ;; ++ esac ++ ++ hardcode_libdir_separator_CXX=: ++ ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ # ++ # There doesn't appear to be a way to prevent this compiler from ++ # explicitly linking system object files so we need to strip them ++ # from the output so that they don't get included in the library ++ # dependencies. ++ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ++ ;; ++ *) ++ if test yes,no = "$GXX,$with_gnu_ld"; then ++ allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' ++ case $host in ++ osf3*) ++ archive_cmds_CXX='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ ;; ++ *) ++ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ ;; ++ esac ++ ++ hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' ++ hardcode_libdir_separator_CXX=: ++ ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' ++ ++ else ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ fi ++ ;; ++ esac ++ ;; ++ ++ psos*) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ ++ sunos4*) ++ case $cc_basename in ++ CC*) ++ # Sun C++ 4.x ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ lcc*) ++ # Lucid ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ *) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ esac ++ ;; ++ ++ solaris*) ++ case $cc_basename in ++ CC* | sunCC*) ++ # Sun C++ 4.2, 5.x and Centerline C++ ++ archive_cmds_need_lc_CXX=yes ++ no_undefined_flag_CXX=' -zdefs' ++ archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ ++ $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' ++ ++ hardcode_libdir_flag_spec_CXX='-R$libdir' ++ hardcode_shlibpath_var_CXX=no ++ case $host_os in ++ solaris2.[0-5] | solaris2.[0-5].*) ;; ++ *) ++ # The compiler driver will combine and reorder linker options, ++ # but understands '-z linker_flag'. ++ # Supported since Solaris 2.6 (maybe 2.5.1?) ++ whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' ++ ;; ++ esac ++ link_all_deplibs_CXX=yes ++ ++ output_verbose_link_cmd='func_echo_all' ++ ++ # Archives containing C++ object files must be created using ++ # "CC -xar", where "CC" is the Sun C++ compiler. This is ++ # necessary to make sure instantiated templates are included ++ # in the archive. ++ old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ++ ;; ++ gcx*) ++ # Green Hills C++ Compiler ++ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' ++ ++ # The C++ compiler must be used to create the archive. ++ old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ++ ;; ++ *) ++ # GNU C++ compiler with Solaris linker ++ if test yes,no = "$GXX,$with_gnu_ld"; then ++ no_undefined_flag_CXX=' $wl-z ${wl}defs' ++ if $CC --version | $GREP -v '^2\.7' > /dev/null; then ++ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' ++ archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ ++ $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' ++ ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' ++ else ++ # g++ 2.7 appears to require '-G' NOT '-shared' on this ++ # platform. ++ archive_cmds_CXX='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' ++ archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ ++ $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' ++ ++ # Commands to make compiler produce verbose output that lists ++ # what "hidden" libraries, object files and flags are used when ++ # linking a shared library. ++ output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' ++ fi ++ ++ hardcode_libdir_flag_spec_CXX='$wl-R $wl$libdir' ++ case $host_os in ++ solaris2.[0-5] | solaris2.[0-5].*) ;; ++ *) ++ whole_archive_flag_spec_CXX='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' ++ ;; ++ esac ++ fi ++ ;; ++ esac ++ ;; ++ ++ sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) ++ no_undefined_flag_CXX='$wl-z,text' ++ archive_cmds_need_lc_CXX=no ++ hardcode_shlibpath_var_CXX=no ++ runpath_var='LD_RUN_PATH' ++ ++ case $cc_basename in ++ CC*) ++ archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ *) ++ archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ esac ++ ;; ++ ++ sysv5* | sco3.2v5* | sco5v6*) ++ # Note: We CANNOT use -z defs as we might desire, because we do not ++ # link with -lc, and that would cause any symbols used from libc to ++ # always be unresolved, which means just about no library would ++ # ever link correctly. If we're not using GNU ld we use -z text ++ # though, which does catch some bad symbols but isn't as heavy-handed ++ # as -z defs. ++ no_undefined_flag_CXX='$wl-z,text' ++ allow_undefined_flag_CXX='$wl-z,nodefs' ++ archive_cmds_need_lc_CXX=no ++ hardcode_shlibpath_var_CXX=no ++ hardcode_libdir_flag_spec_CXX='$wl-R,$libdir' ++ hardcode_libdir_separator_CXX=':' ++ link_all_deplibs_CXX=yes ++ export_dynamic_flag_spec_CXX='$wl-Bexport' ++ runpath_var='LD_RUN_PATH' ++ ++ case $cc_basename in ++ CC*) ++ archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~ ++ '"$old_archive_cmds_CXX" ++ reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~ ++ '"$reload_cmds_CXX" ++ ;; ++ *) ++ archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ ;; ++ esac ++ ;; ++ ++ tandem*) ++ case $cc_basename in ++ NCC*) ++ # NonStop-UX NCC 3.20 ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ *) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ esac ++ ;; ++ ++ vxworks*) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ ++ *) ++ # FIXME: insert proper C++ library support ++ ld_shlibs_CXX=no ++ ;; ++ esac ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 ++printf "%s\n" "$ld_shlibs_CXX" >&6; } ++ test no = "$ld_shlibs_CXX" && can_build_shared=no ++ ++ GCC_CXX=$GXX ++ LD_CXX=$LD ++ ++ ## CAVEAT EMPTOR: ++ ## There is no encapsulation within the following macros, do not change ++ ## the running order or otherwise move them around unless you know exactly ++ ## what you are doing... ++ # Dependencies to place before and after the object being linked: ++predep_objects_CXX= ++postdep_objects_CXX= ++predeps_CXX= ++postdeps_CXX= ++compiler_lib_search_path_CXX= ++ ++cat > conftest.$ac_ext <<_LT_EOF ++class Foo ++{ ++public: ++ Foo (void) { a = 0; } ++private: ++ int a; ++}; ++_LT_EOF ++ ++ ++_lt_libdeps_save_CFLAGS=$CFLAGS ++case "$CC $CFLAGS " in #( ++*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; ++*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; ++*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; ++esac ++ ++if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 ++ (eval $ac_compile) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; then ++ # Parse the compiler output and extract the necessary ++ # objects, libraries and library flags. ++ ++ # Sentinel used to keep track of whether or not we are before ++ # the conftest object file. ++ pre_test_object_deps_done=no ++ ++ for p in `eval "$output_verbose_link_cmd"`; do ++ case $prev$p in ++ ++ -L* | -R* | -l*) ++ # Some compilers place space between "-{L,R}" and the path. ++ # Remove the space. ++ if test x-L = "$p" || ++ test x-R = "$p"; then ++ prev=$p ++ continue ++ fi ++ ++ # Expand the sysroot to ease extracting the directories later. ++ if test -z "$prev"; then ++ case $p in ++ -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; ++ -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; ++ -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; ++ esac ++ fi ++ case $p in ++ =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; ++ esac ++ if test no = "$pre_test_object_deps_done"; then ++ case $prev in ++ -L | -R) ++ # Internal compiler library paths should come after those ++ # provided the user. The postdeps already come after the ++ # user supplied libs so there is no need to process them. ++ if test -z "$compiler_lib_search_path_CXX"; then ++ compiler_lib_search_path_CXX=$prev$p ++ else ++ compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} $prev$p" ++ fi ++ ;; ++ # The "-l" case would never come before the object being ++ # linked, so don't bother handling this case. ++ esac ++ else ++ if test -z "$postdeps_CXX"; then ++ postdeps_CXX=$prev$p ++ else ++ postdeps_CXX="${postdeps_CXX} $prev$p" ++ fi ++ fi ++ prev= ++ ;; ++ ++ *.lto.$objext) ;; # Ignore GCC LTO objects ++ *.$objext) ++ # This assumes that the test object file only shows up ++ # once in the compiler output. ++ if test "$p" = "conftest.$objext"; then ++ pre_test_object_deps_done=yes ++ continue ++ fi ++ ++ if test no = "$pre_test_object_deps_done"; then ++ if test -z "$predep_objects_CXX"; then ++ predep_objects_CXX=$p ++ else ++ predep_objects_CXX="$predep_objects_CXX $p" ++ fi ++ else ++ if test -z "$postdep_objects_CXX"; then ++ postdep_objects_CXX=$p ++ else ++ postdep_objects_CXX="$postdep_objects_CXX $p" ++ fi ++ fi ++ ;; ++ ++ *) ;; # Ignore the rest. ++ ++ esac ++ done ++ ++ # Clean up. ++ rm -f a.out a.exe ++else ++ echo "libtool.m4: error: problem compiling CXX test program" ++fi ++ ++$RM -f confest.$objext ++CFLAGS=$_lt_libdeps_save_CFLAGS ++ ++# PORTME: override above test on systems where it is broken ++case $host_os in ++interix[3-9]*) ++ # Interix 3.5 installs completely hosed .la files for C++, so rather than ++ # hack all around it, let's just trust "g++" to DTRT. ++ predep_objects_CXX= ++ postdep_objects_CXX= ++ postdeps_CXX= ++ ;; ++esac ++ ++ ++case " $postdeps_CXX " in ++*" -lc "*) archive_cmds_need_lc_CXX=no ;; ++esac ++ compiler_lib_search_dirs_CXX= ++if test -n "${compiler_lib_search_path_CXX}"; then ++ compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | $SED -e 's! -L! !g' -e 's!^ !!'` ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ lt_prog_compiler_wl_CXX= ++lt_prog_compiler_pic_CXX= ++lt_prog_compiler_static_CXX= ++ ++ ++ # C++ specific cases for pic, static, wl, etc. ++ if test yes = "$GXX"; then ++ lt_prog_compiler_wl_CXX='-Wl,' ++ lt_prog_compiler_static_CXX='-static' ++ ++ case $host_os in ++ aix*) ++ # All AIX code is PIC. ++ if test ia64 = "$host_cpu"; then ++ # AIX 5 now supports IA64 processor ++ lt_prog_compiler_static_CXX='-Bstatic' ++ fi ++ lt_prog_compiler_pic_CXX='-fPIC' ++ ;; ++ ++ amigaos*) ++ case $host_cpu in ++ powerpc) ++ # see comment about AmigaOS4 .so support ++ lt_prog_compiler_pic_CXX='-fPIC' ++ ;; ++ m68k) ++ # FIXME: we need at least 68020 code to build shared libraries, but ++ # adding the '-m68020' flag to GCC prevents building anything better, ++ # like '-m68040'. ++ lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' ++ ;; ++ esac ++ ;; ++ ++ beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) ++ # PIC is the default for these OSes. ++ ;; ++ mingw* | cygwin* | os2* | pw32* | cegcc*) ++ # This hack is so that the source file can tell whether it is being ++ # built for inclusion in a dll (and should export symbols for example). ++ # Although the cygwin gcc ignores -fPIC, still need this for old-style ++ # (--disable-auto-import) libraries ++ lt_prog_compiler_pic_CXX='-DDLL_EXPORT' ++ case $host_os in ++ os2*) ++ lt_prog_compiler_static_CXX='$wl-static' ++ ;; ++ esac ++ ;; ++ darwin* | rhapsody*) ++ # PIC is the default on this platform ++ # Common symbols not allowed in MH_DYLIB files ++ lt_prog_compiler_pic_CXX='-fno-common' ++ ;; ++ *djgpp*) ++ # DJGPP does not support shared libraries at all ++ lt_prog_compiler_pic_CXX= ++ ;; ++ haiku*) ++ # PIC is the default for Haiku. ++ # The "-static" flag exists, but is broken. ++ lt_prog_compiler_static_CXX= ++ ;; ++ interix[3-9]*) ++ # Interix 3.x gcc -fpic/-fPIC options generate broken code. ++ # Instead, we relocate shared libraries at runtime. ++ ;; ++ sysv4*MP*) ++ if test -d /usr/nec; then ++ lt_prog_compiler_pic_CXX=-Kconform_pic ++ fi ++ ;; ++ hpux*) ++ # PIC is the default for 64-bit PA HP-UX, but not for 32-bit ++ # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag ++ # sets the default TLS model and affects inlining. ++ case $host_cpu in ++ hppa*64*) ++ ;; ++ *) ++ lt_prog_compiler_pic_CXX='-fPIC' ++ ;; ++ esac ++ ;; ++ *qnx* | *nto*) ++ # QNX uses GNU C++, but need to define -shared option too, otherwise ++ # it will coredump. ++ lt_prog_compiler_pic_CXX='-fPIC -shared' ++ ;; ++ *) ++ lt_prog_compiler_pic_CXX='-fPIC' ++ ;; ++ esac ++ else ++ case $host_os in ++ aix[4-9]*) ++ # All AIX code is PIC. ++ if test ia64 = "$host_cpu"; then ++ # AIX 5 now supports IA64 processor ++ lt_prog_compiler_static_CXX='-Bstatic' ++ else ++ lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' ++ fi ++ ;; ++ chorus*) ++ case $cc_basename in ++ cxch68*) ++ # Green Hills C++ Compiler ++ # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ++ ;; ++ esac ++ ;; ++ mingw* | cygwin* | os2* | pw32* | cegcc*) ++ # This hack is so that the source file can tell whether it is being ++ # built for inclusion in a dll (and should export symbols for example). ++ lt_prog_compiler_pic_CXX='-DDLL_EXPORT' ++ ;; ++ dgux*) ++ case $cc_basename in ++ ec++*) ++ lt_prog_compiler_pic_CXX='-KPIC' ++ ;; ++ ghcx*) ++ # Green Hills C++ Compiler ++ lt_prog_compiler_pic_CXX='-pic' ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ freebsd* | dragonfly* | midnightbsd*) ++ # FreeBSD uses GNU C++ ++ ;; ++ hpux9* | hpux10* | hpux11*) ++ case $cc_basename in ++ CC*) ++ lt_prog_compiler_wl_CXX='-Wl,' ++ lt_prog_compiler_static_CXX='$wl-a ${wl}archive' ++ if test ia64 != "$host_cpu"; then ++ lt_prog_compiler_pic_CXX='+Z' ++ fi ++ ;; ++ aCC*) ++ lt_prog_compiler_wl_CXX='-Wl,' ++ lt_prog_compiler_static_CXX='$wl-a ${wl}archive' ++ case $host_cpu in ++ hppa*64*|ia64*) ++ # +Z the default ++ ;; ++ *) ++ lt_prog_compiler_pic_CXX='+Z' ++ ;; ++ esac ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ interix*) ++ # This is c89, which is MS Visual C++ (no shared libs) ++ # Anyone wants to do a port? ++ ;; ++ irix5* | irix6* | nonstopux*) ++ case $cc_basename in ++ CC*) ++ lt_prog_compiler_wl_CXX='-Wl,' ++ lt_prog_compiler_static_CXX='-non_shared' ++ # CC pic flag -KPIC is the default. ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ case $cc_basename in ++ KCC*) ++ # KAI C++ Compiler ++ lt_prog_compiler_wl_CXX='--backend -Wl,' ++ lt_prog_compiler_pic_CXX='-fPIC' ++ ;; ++ ecpc* ) ++ # old Intel C++ for x86_64, which still supported -KPIC. ++ lt_prog_compiler_wl_CXX='-Wl,' ++ lt_prog_compiler_pic_CXX='-KPIC' ++ lt_prog_compiler_static_CXX='-static' ++ ;; ++ icpc* ) ++ # Intel C++, used to be incompatible with GCC. ++ # ICC 10 doesn't accept -KPIC any more. ++ lt_prog_compiler_wl_CXX='-Wl,' ++ lt_prog_compiler_pic_CXX='-fPIC' ++ lt_prog_compiler_static_CXX='-static' ++ ;; ++ pgCC* | pgcpp*) ++ # Portland Group C++ compiler ++ lt_prog_compiler_wl_CXX='-Wl,' ++ lt_prog_compiler_pic_CXX='-fpic' ++ lt_prog_compiler_static_CXX='-Bstatic' ++ ;; ++ cxx*) ++ # Compaq C++ ++ # Make sure the PIC flag is empty. It appears that all Alpha ++ # Linux and Compaq Tru64 Unix objects are PIC. ++ lt_prog_compiler_pic_CXX= ++ lt_prog_compiler_static_CXX='-non_shared' ++ ;; ++ xlc* | xlC* | bgxl[cC]* | mpixl[cC]*) ++ # IBM XL 8.0, 9.0 on PPC and BlueGene ++ lt_prog_compiler_wl_CXX='-Wl,' ++ lt_prog_compiler_pic_CXX='-qpic' ++ lt_prog_compiler_static_CXX='-qstaticlink' ++ ;; ++ *) ++ case `$CC -V 2>&1 | $SED 5q` in ++ *Sun\ C*) ++ # Sun C++ 5.9 ++ lt_prog_compiler_pic_CXX='-KPIC' ++ lt_prog_compiler_static_CXX='-Bstatic' ++ lt_prog_compiler_wl_CXX='-Qoption ld ' ++ ;; ++ esac ++ ;; ++ esac ++ ;; ++ lynxos*) ++ ;; ++ m88k*) ++ ;; ++ mvs*) ++ case $cc_basename in ++ cxx*) ++ lt_prog_compiler_pic_CXX='-W c,exportall' ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ netbsd*) ++ ;; ++ *qnx* | *nto*) ++ # QNX uses GNU C++, but need to define -shared option too, otherwise ++ # it will coredump. ++ lt_prog_compiler_pic_CXX='-fPIC -shared' ++ ;; ++ osf3* | osf4* | osf5*) ++ case $cc_basename in ++ KCC*) ++ lt_prog_compiler_wl_CXX='--backend -Wl,' ++ ;; ++ RCC*) ++ # Rational C++ 2.4.1 ++ lt_prog_compiler_pic_CXX='-pic' ++ ;; ++ cxx*) ++ # Digital/Compaq C++ ++ lt_prog_compiler_wl_CXX='-Wl,' ++ # Make sure the PIC flag is empty. It appears that all Alpha ++ # Linux and Compaq Tru64 Unix objects are PIC. ++ lt_prog_compiler_pic_CXX= ++ lt_prog_compiler_static_CXX='-non_shared' ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ psos*) ++ ;; ++ solaris*) ++ case $cc_basename in ++ CC* | sunCC*) ++ # Sun C++ 4.2, 5.x and Centerline C++ ++ lt_prog_compiler_pic_CXX='-KPIC' ++ lt_prog_compiler_static_CXX='-Bstatic' ++ lt_prog_compiler_wl_CXX='-Qoption ld ' ++ ;; ++ gcx*) ++ # Green Hills C++ Compiler ++ lt_prog_compiler_pic_CXX='-PIC' ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ sunos4*) ++ case $cc_basename in ++ CC*) ++ # Sun C++ 4.x ++ lt_prog_compiler_pic_CXX='-pic' ++ lt_prog_compiler_static_CXX='-Bstatic' ++ ;; ++ lcc*) ++ # Lucid ++ lt_prog_compiler_pic_CXX='-pic' ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) ++ case $cc_basename in ++ CC*) ++ lt_prog_compiler_wl_CXX='-Wl,' ++ lt_prog_compiler_pic_CXX='-KPIC' ++ lt_prog_compiler_static_CXX='-Bstatic' ++ ;; ++ esac ++ ;; ++ tandem*) ++ case $cc_basename in ++ NCC*) ++ # NonStop-UX NCC 3.20 ++ lt_prog_compiler_pic_CXX='-KPIC' ++ ;; ++ *) ++ ;; ++ esac ++ ;; ++ vxworks*) ++ ;; ++ *) ++ lt_prog_compiler_can_build_shared_CXX=no ++ ;; ++ esac ++ fi ++ ++case $host_os in ++ # For platforms that do not support PIC, -DPIC is meaningless: ++ *djgpp*) ++ lt_prog_compiler_pic_CXX= ++ ;; ++ *) ++ lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" ++ ;; ++esac ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 ++printf %s "checking for $compiler option to produce PIC... " >&6; } ++if test ${lt_cv_prog_compiler_pic_CXX+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 ++printf "%s\n" "$lt_cv_prog_compiler_pic_CXX" >&6; } ++lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX ++ ++# ++# Check to make sure the PIC flag actually works. ++# ++if test -n "$lt_prog_compiler_pic_CXX"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 ++printf %s "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } ++if test ${lt_cv_prog_compiler_pic_works_CXX+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_prog_compiler_pic_works_CXX=no ++ ac_outfile=conftest.$ac_objext ++ echo "$lt_simple_compile_test_code" > conftest.$ac_ext ++ lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" ## exclude from sc_useless_quotes_in_assignment ++ # Insert the option either (1) after the last *FLAGS variable, or ++ # (2) before a word containing "conftest.", or (3) at the end. ++ # Note that $ac_compile itself does not contain backslashes and begins ++ # with a dollar sign (not a hyphen), so the echo should work correctly. ++ # The option is referenced via a variable to avoid confusing sed. ++ lt_compile=`echo "$ac_compile" | $SED \ ++ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ ++ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ ++ -e 's:$: $lt_compiler_flag:'` ++ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) ++ (eval "$lt_compile" 2>conftest.err) ++ ac_status=$? ++ cat conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ if (exit $ac_status) && test -s "$ac_outfile"; then ++ # The compiler can only warn and ignore the option if not recognized ++ # So say no if there are warnings other than the usual output. ++ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp ++ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 ++ if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then ++ lt_cv_prog_compiler_pic_works_CXX=yes ++ fi ++ fi ++ $RM conftest* ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 ++printf "%s\n" "$lt_cv_prog_compiler_pic_works_CXX" >&6; } ++ ++if test yes = "$lt_cv_prog_compiler_pic_works_CXX"; then ++ case $lt_prog_compiler_pic_CXX in ++ "" | " "*) ;; ++ *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; ++ esac ++else ++ lt_prog_compiler_pic_CXX= ++ lt_prog_compiler_can_build_shared_CXX=no ++fi ++ ++fi ++ ++ ++ ++ ++ ++# ++# Check to make sure the static flag actually works. ++# ++wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 ++printf %s "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } ++if test ${lt_cv_prog_compiler_static_works_CXX+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_prog_compiler_static_works_CXX=no ++ save_LDFLAGS=$LDFLAGS ++ LDFLAGS="$LDFLAGS $lt_tmp_static_flag" ++ echo "$lt_simple_link_test_code" > conftest.$ac_ext ++ if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then ++ # The linker can only warn and ignore the option if not recognized ++ # So say no if there are warnings ++ if test -s conftest.err; then ++ # Append any errors to the config.log. ++ cat conftest.err 1>&5 ++ $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp ++ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 ++ if diff conftest.exp conftest.er2 >/dev/null; then ++ lt_cv_prog_compiler_static_works_CXX=yes ++ fi ++ else ++ lt_cv_prog_compiler_static_works_CXX=yes ++ fi ++ fi ++ $RM -r conftest* ++ LDFLAGS=$save_LDFLAGS ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 ++printf "%s\n" "$lt_cv_prog_compiler_static_works_CXX" >&6; } ++ ++if test yes = "$lt_cv_prog_compiler_static_works_CXX"; then ++ : ++else ++ lt_prog_compiler_static_CXX= ++fi ++ ++ ++ ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 ++printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } ++if test ${lt_cv_prog_compiler_c_o_CXX+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_prog_compiler_c_o_CXX=no ++ $RM -r conftest 2>/dev/null ++ mkdir conftest ++ cd conftest ++ mkdir out ++ echo "$lt_simple_compile_test_code" > conftest.$ac_ext ++ ++ lt_compiler_flag="-o out/conftest2.$ac_objext" ++ # Insert the option either (1) after the last *FLAGS variable, or ++ # (2) before a word containing "conftest.", or (3) at the end. ++ # Note that $ac_compile itself does not contain backslashes and begins ++ # with a dollar sign (not a hyphen), so the echo should work correctly. ++ lt_compile=`echo "$ac_compile" | $SED \ ++ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ ++ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ ++ -e 's:$: $lt_compiler_flag:'` ++ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) ++ (eval "$lt_compile" 2>out/conftest.err) ++ ac_status=$? ++ cat out/conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ if (exit $ac_status) && test -s out/conftest2.$ac_objext ++ then ++ # The compiler can only warn and ignore the option if not recognized ++ # So say no if there are warnings ++ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp ++ $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 ++ if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then ++ lt_cv_prog_compiler_c_o_CXX=yes ++ fi ++ fi ++ chmod u+w . 2>&5 ++ $RM conftest* ++ # SGI C++ compiler will create directory out/ii_files/ for ++ # template instantiation ++ test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files ++ $RM out/* && rmdir out ++ cd .. ++ $RM -r conftest ++ $RM conftest* ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 ++printf "%s\n" "$lt_cv_prog_compiler_c_o_CXX" >&6; } ++ ++ ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 ++printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } ++if test ${lt_cv_prog_compiler_c_o_CXX+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_prog_compiler_c_o_CXX=no ++ $RM -r conftest 2>/dev/null ++ mkdir conftest ++ cd conftest ++ mkdir out ++ echo "$lt_simple_compile_test_code" > conftest.$ac_ext ++ ++ lt_compiler_flag="-o out/conftest2.$ac_objext" ++ # Insert the option either (1) after the last *FLAGS variable, or ++ # (2) before a word containing "conftest.", or (3) at the end. ++ # Note that $ac_compile itself does not contain backslashes and begins ++ # with a dollar sign (not a hyphen), so the echo should work correctly. ++ lt_compile=`echo "$ac_compile" | $SED \ ++ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ ++ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ ++ -e 's:$: $lt_compiler_flag:'` ++ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) ++ (eval "$lt_compile" 2>out/conftest.err) ++ ac_status=$? ++ cat out/conftest.err >&5 ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ if (exit $ac_status) && test -s out/conftest2.$ac_objext ++ then ++ # The compiler can only warn and ignore the option if not recognized ++ # So say no if there are warnings ++ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp ++ $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 ++ if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then ++ lt_cv_prog_compiler_c_o_CXX=yes ++ fi ++ fi ++ chmod u+w . 2>&5 ++ $RM conftest* ++ # SGI C++ compiler will create directory out/ii_files/ for ++ # template instantiation ++ test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files ++ $RM out/* && rmdir out ++ cd .. ++ $RM -r conftest ++ $RM conftest* ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 ++printf "%s\n" "$lt_cv_prog_compiler_c_o_CXX" >&6; } ++ ++ ++ ++ ++hard_links=nottested ++if test no = "$lt_cv_prog_compiler_c_o_CXX" && test no != "$need_locks"; then ++ # do not overwrite the value of need_locks provided by the user ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 ++printf %s "checking if we can lock with hard links... " >&6; } ++ hard_links=yes ++ $RM conftest* ++ ln conftest.a conftest.b 2>/dev/null && hard_links=no ++ touch conftest.a ++ ln conftest.a conftest.b 2>&5 || hard_links=no ++ ln conftest.a conftest.b 2>/dev/null && hard_links=no ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 ++printf "%s\n" "$hard_links" >&6; } ++ if test no = "$hard_links"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 ++printf "%s\n" "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} ++ need_locks=warn ++ fi ++else ++ need_locks=no ++fi ++ ++ ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 ++printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } ++ ++ export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ++ exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' ++ case $host_os in ++ aix[4-9]*) ++ # If we're using GNU nm, then we don't want the "-C" option. ++ # -C means demangle to GNU nm, but means don't demangle to AIX nm. ++ # Without the "-l" option, or with the "-B" option, AIX nm treats ++ # weak defined symbols like other global defined symbols, whereas ++ # GNU nm marks them as "W". ++ # While the 'weak' keyword is ignored in the Export File, we need ++ # it in the Import File for the 'aix-soname' feature, so we have ++ # to replace the "-B" option with "-P" for AIX nm. ++ if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then ++ export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' ++ else ++ export_symbols_cmds_CXX='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' ++ fi ++ ;; ++ pw32*) ++ export_symbols_cmds_CXX=$ltdll_cmds ++ ;; ++ cygwin* | mingw* | cegcc*) ++ case $cc_basename in ++ cl* | icl*) ++ exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' ++ ;; ++ *) ++ export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' ++ exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' ++ ;; ++ esac ++ ;; ++ *) ++ export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ++ ;; ++ esac ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 ++printf "%s\n" "$ld_shlibs_CXX" >&6; } ++test no = "$ld_shlibs_CXX" && can_build_shared=no ++ ++with_gnu_ld_CXX=$with_gnu_ld ++ ++ ++ ++ ++ ++ ++# ++# Do we need to explicitly link libc? ++# ++case "x$archive_cmds_need_lc_CXX" in ++x|xyes) ++ # Assume -lc should be added ++ archive_cmds_need_lc_CXX=yes ++ ++ if test yes,yes = "$GCC,$enable_shared"; then ++ case $archive_cmds_CXX in ++ *'~'*) ++ # FIXME: we may have to deal with multi-command sequences. ++ ;; ++ '$CC '*) ++ # Test whether the compiler implicitly links with -lc since on some ++ # systems, -lgcc has to come before -lc. If gcc already passes -lc ++ # to ld, don't add -lc before -lgcc. ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 ++printf %s "checking whether -lc should be explicitly linked in... " >&6; } ++if test ${lt_cv_archive_cmds_need_lc_CXX+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ $RM conftest* ++ echo "$lt_simple_compile_test_code" > conftest.$ac_ext ++ ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 ++ (eval $ac_compile) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } 2>conftest.err; then ++ soname=conftest ++ lib=conftest ++ libobjs=conftest.$ac_objext ++ deplibs= ++ wl=$lt_prog_compiler_wl_CXX ++ pic_flag=$lt_prog_compiler_pic_CXX ++ compiler_flags=-v ++ linker_flags=-v ++ verstring= ++ output_objdir=. ++ libname=conftest ++ lt_save_allow_undefined_flag=$allow_undefined_flag_CXX ++ allow_undefined_flag_CXX= ++ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 ++ (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ++ ac_status=$? ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ then ++ lt_cv_archive_cmds_need_lc_CXX=no ++ else ++ lt_cv_archive_cmds_need_lc_CXX=yes ++ fi ++ allow_undefined_flag_CXX=$lt_save_allow_undefined_flag ++ else ++ cat conftest.err 1>&5 ++ fi ++ $RM conftest* ++ ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 ++printf "%s\n" "$lt_cv_archive_cmds_need_lc_CXX" >&6; } ++ archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX ++ ;; ++ esac ++ fi ++ ;; ++esac ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 ++printf %s "checking dynamic linker characteristics... " >&6; } ++ ++library_names_spec= ++libname_spec='lib$name' ++soname_spec= ++shrext_cmds=.so ++postinstall_cmds= ++postuninstall_cmds= ++finish_cmds= ++finish_eval= ++shlibpath_var= ++shlibpath_overrides_runpath=unknown ++version_type=none ++dynamic_linker="$host_os ld.so" ++sys_lib_dlsearch_path_spec="/lib /usr/lib" ++need_lib_prefix=unknown ++hardcode_into_libs=no ++ ++# when you set need_version to no, make sure it does not cause -set_version ++# flags to be left without arguments ++need_version=unknown ++ ++ ++ ++case $host_os in ++aix3*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$release$shared_ext$versuffix $libname.a' ++ shlibpath_var=LIBPATH ++ ++ # AIX 3 has no versioning support, so we append a major version to the name. ++ soname_spec='$libname$release$shared_ext$major' ++ ;; ++ ++aix[4-9]*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ hardcode_into_libs=yes ++ if test ia64 = "$host_cpu"; then ++ # AIX 5 supports IA64 ++ library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' ++ shlibpath_var=LD_LIBRARY_PATH ++ else ++ # With GCC up to 2.95.x, collect2 would create an import file ++ # for dependence libraries. The import file would start with ++ # the line '#! .'. This would cause the generated library to ++ # depend on '.', always an invalid library. This was fixed in ++ # development snapshots of GCC prior to 3.0. ++ case $host_os in ++ aix4 | aix4.[01] | aix4.[01].*) ++ if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' ++ echo ' yes ' ++ echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then ++ : ++ else ++ can_build_shared=no ++ fi ++ ;; ++ esac ++ # Using Import Files as archive members, it is possible to support ++ # filename-based versioning of shared library archives on AIX. While ++ # this would work for both with and without runtime linking, it will ++ # prevent static linking of such archives. So we do filename-based ++ # shared library versioning with .so extension only, which is used ++ # when both runtime linking and shared linking is enabled. ++ # Unfortunately, runtime linking may impact performance, so we do ++ # not want this to be the default eventually. Also, we use the ++ # versioned .so libs for executables only if there is the -brtl ++ # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. ++ # To allow for filename-based versioning support, we need to create ++ # libNAME.so.V as an archive file, containing: ++ # *) an Import File, referring to the versioned filename of the ++ # archive as well as the shared archive member, telling the ++ # bitwidth (32 or 64) of that shared object, and providing the ++ # list of exported symbols of that shared object, eventually ++ # decorated with the 'weak' keyword ++ # *) the shared object with the F_LOADONLY flag set, to really avoid ++ # it being seen by the linker. ++ # At run time we better use the real file rather than another symlink, ++ # but for link time we create the symlink libNAME.so -> libNAME.so.V ++ ++ case $with_aix_soname,$aix_use_runtimelinking in ++ # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct ++ # soname into executable. Probably we can add versioning support to ++ # collect2, so additional links can be useful in future. ++ aix,yes) # traditional libtool ++ dynamic_linker='AIX unversionable lib.so' ++ # If using run time linking (on AIX 4.2 or later) use lib.so ++ # instead of lib.a to let people know that these are not ++ # typical AIX shared libraries. ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ ;; ++ aix,no) # traditional AIX only ++ dynamic_linker='AIX lib.a(lib.so.V)' ++ # We preserve .a as extension for shared libraries through AIX4.2 ++ # and later when we are not doing run time linking. ++ library_names_spec='$libname$release.a $libname.a' ++ soname_spec='$libname$release$shared_ext$major' ++ ;; ++ svr4,*) # full svr4 only ++ dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" ++ library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' ++ # We do not specify a path in Import Files, so LIBPATH fires. ++ shlibpath_overrides_runpath=yes ++ ;; ++ *,yes) # both, prefer svr4 ++ dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" ++ library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' ++ # unpreferred sharedlib libNAME.a needs extra handling ++ postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' ++ postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' ++ # We do not specify a path in Import Files, so LIBPATH fires. ++ shlibpath_overrides_runpath=yes ++ ;; ++ *,no) # both, prefer aix ++ dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" ++ library_names_spec='$libname$release.a $libname.a' ++ soname_spec='$libname$release$shared_ext$major' ++ # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling ++ postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' ++ postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' ++ ;; ++ esac ++ shlibpath_var=LIBPATH ++ fi ++ ;; ++ ++amigaos*) ++ case $host_cpu in ++ powerpc) ++ # Since July 2007 AmigaOS4 officially supports .so libraries. ++ # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ ;; ++ m68k) ++ library_names_spec='$libname.ixlibrary $libname.a' ++ # Create ${libname}_ixlibrary.a entries in /sys/libs. ++ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ++ ;; ++ esac ++ ;; ++ ++beos*) ++ library_names_spec='$libname$shared_ext' ++ dynamic_linker="$host_os ld.so" ++ shlibpath_var=LIBRARY_PATH ++ ;; ++ ++bsdi[45]*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' ++ shlibpath_var=LD_LIBRARY_PATH ++ sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" ++ sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" ++ # the default ld.so.conf also contains /usr/contrib/lib and ++ # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow ++ # libtool to hard-code these into programs ++ ;; ++ ++cygwin* | mingw* | pw32* | cegcc*) ++ version_type=windows ++ shrext_cmds=.dll ++ need_version=no ++ need_lib_prefix=no ++ ++ case $GCC,$cc_basename in ++ yes,*) ++ # gcc ++ library_names_spec='$libname.dll.a' ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \$file`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname~ ++ chmod a+x \$dldir/$dlname~ ++ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then ++ eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; ++ fi' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ ++ case $host_os in ++ cygwin*) ++ # Cygwin DLLs use 'cyg' prefix rather than 'lib' ++ soname_spec='`echo $libname | $SED -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ++ ++ ;; ++ mingw* | cegcc*) ++ # MinGW DLLs use traditional 'lib' prefix ++ soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ++ ;; ++ pw32*) ++ # pw32 DLLs use 'pw' prefix rather than 'lib' ++ library_names_spec='`echo $libname | $SED -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ++ ;; ++ esac ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ ++ *,cl* | *,icl*) ++ # Native MSVC or ICC ++ libname_spec='$name' ++ soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ++ library_names_spec='$libname.dll.lib' ++ ++ case $build_os in ++ mingw*) ++ sys_lib_search_path_spec= ++ lt_save_ifs=$IFS ++ IFS=';' ++ for lt_path in $LIB ++ do ++ IFS=$lt_save_ifs ++ # Let DOS variable expansion print the short 8.3 style file name. ++ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` ++ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" ++ done ++ IFS=$lt_save_ifs ++ # Convert to MSYS style. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ++ ;; ++ cygwin*) ++ # Convert to unix form, then to dos form, then back to unix form ++ # but this time dos style (no spaces!) so that the unix form looks ++ # like /cygdrive/c/PROGRA~1:/cygdr... ++ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` ++ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` ++ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ *) ++ sys_lib_search_path_spec=$LIB ++ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then ++ # It is most probably a Windows format PATH. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` ++ else ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ fi ++ # FIXME: find the short name or the path components, as spaces are ++ # common. (e.g. "Program Files" -> "PROGRA~1") ++ ;; ++ esac ++ ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \$file`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ dynamic_linker='Win32 link.exe' ++ ;; ++ ++ *) ++ # Assume MSVC and ICC wrapper ++ library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ esac ++ # FIXME: first we should search . and the directory the executable is in ++ shlibpath_var=PATH ++ ;; ++ ++darwin* | rhapsody*) ++ dynamic_linker="$host_os dyld" ++ version_type=darwin ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' ++ soname_spec='$libname$release$major$shared_ext' ++ shlibpath_overrides_runpath=yes ++ shlibpath_var=DYLD_LIBRARY_PATH ++ shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' ++ ++ sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ++ ;; ++ ++dgux*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ ;; ++ ++freebsd* | dragonfly* | midnightbsd*) ++ # DragonFly does not have aout. When/if they implement a new ++ # versioning mechanism, adjust this. ++ if test -x /usr/bin/objformat; then ++ objformat=`/usr/bin/objformat` ++ else ++ case $host_os in ++ freebsd[23].*) objformat=aout ;; ++ *) objformat=elf ;; ++ esac ++ fi ++ version_type=freebsd-$objformat ++ case $version_type in ++ freebsd-elf*) ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ need_version=no ++ need_lib_prefix=no ++ ;; ++ freebsd-*) ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ need_version=yes ++ ;; ++ esac ++ shlibpath_var=LD_LIBRARY_PATH ++ case $host_os in ++ freebsd2.*) ++ shlibpath_overrides_runpath=yes ++ ;; ++ freebsd3.[01]* | freebsdelf3.[01]*) ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ ;; ++ freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ ++ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) ++ shlibpath_overrides_runpath=no ++ hardcode_into_libs=yes ++ ;; ++ *) # from 4.6 on, and DragonFly ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ ;; ++ esac ++ ;; ++ ++haiku*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ dynamic_linker="$host_os runtime_loader" ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' ++ hardcode_into_libs=yes ++ ;; ++ ++hpux9* | hpux10* | hpux11*) ++ # Give a soname corresponding to the major version so that dld.sl refuses to ++ # link against other versions. ++ version_type=sunos ++ need_lib_prefix=no ++ need_version=no ++ case $host_cpu in ++ ia64*) ++ shrext_cmds='.so' ++ hardcode_into_libs=yes ++ dynamic_linker="$host_os dld.so" ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ if test 32 = "$HPUX_IA64_MODE"; then ++ sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" ++ sys_lib_dlsearch_path_spec=/usr/lib/hpux32 ++ else ++ sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" ++ sys_lib_dlsearch_path_spec=/usr/lib/hpux64 ++ fi ++ ;; ++ hppa*64*) ++ shrext_cmds='.sl' ++ hardcode_into_libs=yes ++ dynamic_linker="$host_os dld.sl" ++ shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH ++ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" ++ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ++ ;; ++ *) ++ shrext_cmds='.sl' ++ dynamic_linker="$host_os dld.sl" ++ shlibpath_var=SHLIB_PATH ++ shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ ;; ++ esac ++ # HP-UX runs *really* slowly unless shared libraries are mode 555, ... ++ postinstall_cmds='chmod 555 $lib' ++ # or fails outright, so override atomically: ++ install_override_mode=555 ++ ;; ++ ++interix[3-9]*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ hardcode_into_libs=yes ++ ;; ++ ++irix5* | irix6* | nonstopux*) ++ case $host_os in ++ nonstopux*) version_type=nonstopux ;; ++ *) ++ if test yes = "$lt_cv_prog_gnu_ld"; then ++ version_type=linux # correct to gnu/linux during the next big refactor ++ else ++ version_type=irix ++ fi ;; ++ esac ++ need_lib_prefix=no ++ need_version=no ++ soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' ++ case $host_os in ++ irix5* | nonstopux*) ++ libsuff= shlibsuff= ++ ;; ++ *) ++ case $LD in # libtool.m4 will add one of these switches to LD ++ *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") ++ libsuff= shlibsuff= libmagic=32-bit;; ++ *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") ++ libsuff=32 shlibsuff=N32 libmagic=N32;; ++ *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") ++ libsuff=64 shlibsuff=64 libmagic=64-bit;; ++ *) libsuff= shlibsuff= libmagic=never-match;; ++ esac ++ ;; ++ esac ++ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH ++ shlibpath_overrides_runpath=no ++ sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" ++ sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" ++ hardcode_into_libs=yes ++ ;; ++ ++# No shared lib support for Linux oldld, aout, or coff. ++linux*oldld* | linux*aout* | linux*coff*) ++ dynamic_linker=no ++ ;; ++ ++linux*android*) ++ version_type=none # Android doesn't support versioned libraries. ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext' ++ soname_spec='$libname$release$shared_ext' ++ finish_cmds= ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ ++ # This implies no fast_install, which is unacceptable. ++ # Some rework will be needed to allow for fast_install ++ # before this can be enabled. ++ hardcode_into_libs=yes ++ ++ dynamic_linker='Android linker' ++ # Don't embed -rpath directories since the linker doesn't support them. ++ hardcode_libdir_flag_spec_CXX='-L$libdir' ++ ;; ++ ++# This must be glibc/ELF. ++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ ++ # Some binutils ld are patched to set DT_RUNPATH ++ if test ${lt_cv_shlibpath_overrides_runpath+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ lt_cv_shlibpath_overrides_runpath=no ++ save_LDFLAGS=$LDFLAGS ++ save_libdir=$libdir ++ eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \ ++ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\"" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main (void) ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_cxx_try_link "$LINENO" ++then : ++ if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null ++then : ++ lt_cv_shlibpath_overrides_runpath=yes ++fi ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++ conftest$ac_exeext conftest.$ac_ext ++ LDFLAGS=$save_LDFLAGS ++ libdir=$save_libdir ++ ++fi ++ ++ shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath ++ ++ # This implies no fast_install, which is unacceptable. ++ # Some rework will be needed to allow for fast_install ++ # before this can be enabled. ++ hardcode_into_libs=yes ++ ++ # Add ABI-specific directories to the system library path. ++ sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" ++ ++ # Ideally, we could use ldconfig to report *all* directores which are ++ # searched for libraries, however this is still not possible. Aside from not ++ # being certain /sbin/ldconfig is available, command ++ # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, ++ # even though it is searched at run-time. Try to do the best guess by ++ # appending ld.so.conf contents (and includes) to the search path. ++ if test -f /etc/ld.so.conf; then ++ lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` ++ sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" ++ fi ++ ++ # We used to test for /lib/ld.so.1 and disable shared libraries on ++ # powerpc, because MkLinux only supported shared libraries with the ++ # GNU dynamic linker. Since this was broken with cross compilers, ++ # most powerpc-linux boxes support dynamic linking these days and ++ # people can always --disable-shared, the test was removed, and we ++ # assume the GNU/Linux dynamic linker is in use. ++ dynamic_linker='GNU/Linux ld.so' ++ ;; ++ ++netbsd*) ++ version_type=sunos ++ need_lib_prefix=no ++ need_version=no ++ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' ++ dynamic_linker='NetBSD (a.out) ld.so' ++ else ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ dynamic_linker='NetBSD ld.elf_so' ++ fi ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ ;; ++ ++newsos6) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ ;; ++ ++*nto* | *qnx*) ++ version_type=qnx ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ hardcode_into_libs=yes ++ dynamic_linker='ldqnx.so' ++ ;; ++ ++openbsd* | bitrig*) ++ version_type=sunos ++ sys_lib_dlsearch_path_spec=/usr/lib ++ need_lib_prefix=no ++ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then ++ need_version=no ++ else ++ need_version=yes ++ fi ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ ;; ++ ++os2*) ++ libname_spec='$name' ++ version_type=windows ++ shrext_cmds=.dll ++ need_version=no ++ need_lib_prefix=no ++ # OS/2 can only load a DLL with a base name of 8 characters or less. ++ soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; ++ v=$($ECHO $release$versuffix | tr -d .-); ++ n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); ++ $ECHO $n$v`$shared_ext' ++ library_names_spec='${libname}_dll.$libext' ++ dynamic_linker='OS/2 ld.exe' ++ shlibpath_var=BEGINLIBPATH ++ sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" ++ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ++ postinstall_cmds='base_file=`basename \$file`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname~ ++ chmod a+x \$dldir/$dlname~ ++ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then ++ eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; ++ fi' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ ;; ++ ++osf3* | osf4* | osf5*) ++ version_type=osf ++ need_lib_prefix=no ++ need_version=no ++ soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ shlibpath_var=LD_LIBRARY_PATH ++ sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" ++ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ++ ;; ++ ++rdos*) ++ dynamic_linker=no ++ ;; ++ ++solaris*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ # ldd complains unless libraries are executable ++ postinstall_cmds='chmod +x $lib' ++ ;; ++ ++sunos4*) ++ version_type=sunos ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ if test yes = "$with_gnu_ld"; then ++ need_lib_prefix=no ++ fi ++ need_version=yes ++ ;; ++ ++sysv4 | sysv4.3*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ case $host_vendor in ++ sni) ++ shlibpath_overrides_runpath=no ++ need_lib_prefix=no ++ runpath_var=LD_RUN_PATH ++ ;; ++ siemens) ++ need_lib_prefix=no ++ ;; ++ motorola) ++ need_lib_prefix=no ++ need_version=no ++ shlibpath_overrides_runpath=no ++ sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ++ ;; ++ esac ++ ;; ++ ++sysv4*MP*) ++ if test -d /usr/nec; then ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' ++ soname_spec='$libname$shared_ext.$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ fi ++ ;; ++ ++sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) ++ version_type=sco ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=yes ++ hardcode_into_libs=yes ++ if test yes = "$with_gnu_ld"; then ++ sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' ++ else ++ sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' ++ case $host_os in ++ sco3.2v5*) ++ sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ++ ;; ++ esac ++ fi ++ sys_lib_dlsearch_path_spec='/usr/lib' ++ ;; ++ ++tpf*) ++ # TPF is a cross-target only. Preferred cross-host = GNU/Linux. ++ version_type=linux # correct to gnu/linux during the next big refactor ++ need_lib_prefix=no ++ need_version=no ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ shlibpath_var=LD_LIBRARY_PATH ++ shlibpath_overrides_runpath=no ++ hardcode_into_libs=yes ++ ;; ++ ++uts4*) ++ version_type=linux # correct to gnu/linux during the next big refactor ++ library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='$libname$release$shared_ext$major' ++ shlibpath_var=LD_LIBRARY_PATH ++ ;; ++ ++*) ++ dynamic_linker=no ++ ;; ++esac ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 ++printf "%s\n" "$dynamic_linker" >&6; } ++test no = "$dynamic_linker" && can_build_shared=no ++ ++variables_saved_for_relink="PATH $shlibpath_var $runpath_var" ++if test yes = "$GCC"; then ++ variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" ++fi ++ ++if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then ++ sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec ++fi ++ ++if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then ++ sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec ++fi ++ ++# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... ++configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec ++ ++# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code ++func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" ++ ++# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool ++configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 ++printf %s "checking how to hardcode library paths into programs... " >&6; } ++hardcode_action_CXX= ++if test -n "$hardcode_libdir_flag_spec_CXX" || ++ test -n "$runpath_var_CXX" || ++ test yes = "$hardcode_automatic_CXX"; then ++ ++ # We can hardcode non-existent directories. ++ if test no != "$hardcode_direct_CXX" && ++ # If the only mechanism to avoid hardcoding is shlibpath_var, we ++ # have to relink, otherwise we might link with an installed library ++ # when we should be linking with a yet-to-be-installed one ++ ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" && ++ test no != "$hardcode_minus_L_CXX"; then ++ # Linking always hardcodes the temporary library directory. ++ hardcode_action_CXX=relink ++ else ++ # We can link without hardcoding, and we can hardcode nonexisting dirs. ++ hardcode_action_CXX=immediate ++ fi ++else ++ # We cannot hardcode anything, or else we can only hardcode existing ++ # directories. ++ hardcode_action_CXX=unsupported ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 ++printf "%s\n" "$hardcode_action_CXX" >&6; } ++ ++if test relink = "$hardcode_action_CXX" || ++ test yes = "$inherit_rpath_CXX"; then ++ # Fast installation is not supported ++ enable_fast_install=no ++elif test yes = "$shlibpath_overrides_runpath" || ++ test no = "$enable_shared"; then ++ # Fast installation is not necessary ++ enable_fast_install=needless ++fi ++ ++ ++ ++ ++ ++ ++ ++ fi # test -n "$compiler" ++ ++ CC=$lt_save_CC ++ CFLAGS=$lt_save_CFLAGS ++ LDCXX=$LD ++ LD=$lt_save_LD ++ GCC=$lt_save_GCC ++ with_gnu_ld=$lt_save_with_gnu_ld ++ lt_cv_path_LDCXX=$lt_cv_path_LD ++ lt_cv_path_LD=$lt_save_path_LD ++ lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld ++ lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld ++fi # test yes != "$_lt_caught_CXX_error" ++ ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ac_config_commands="$ac_config_commands libtool" ++ ++ ++ ++ ++# Only expand once: ++ ++ ++ACX_LT_HOST_FLAGS ++ ++ac_fn_c_find_intX_t "$LINENO" "64" "ac_cv_c_int64_t" ++case $ac_cv_c_int64_t in #( ++ no|yes) ;; #( ++ *) ++ ++printf "%s\n" "#define int64_t $ac_cv_c_int64_t" >>confdefs.h ++;; ++esac ++ ++ac_fn_c_find_uintX_t "$LINENO" "64" "ac_cv_c_uint64_t" ++case $ac_cv_c_uint64_t in #( ++ no|yes) ;; #( ++ *) ++ ++printf "%s\n" "#define _UINT64_T 1" >>confdefs.h ++ ++ ++printf "%s\n" "#define uint64_t $ac_cv_c_uint64_t" >>confdefs.h ++;; ++ esac ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for sys/wait.h that is POSIX.1 compatible" >&5 ++printf %s "checking for sys/wait.h that is POSIX.1 compatible... " >&6; } ++if test ${ac_cv_header_sys_wait_h+y} ++then : ++ printf %s "(cached) " >&6 ++else $as_nop ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#include ++#include ++#ifndef WEXITSTATUS ++# define WEXITSTATUS(stat_val) ((unsigned int) (stat_val) >> 8) ++#endif ++#ifndef WIFEXITED ++# define WIFEXITED(stat_val) (((stat_val) & 255) == 0) ++#endif ++ ++int ++main (void) ++{ ++ int s; ++ wait (&s); ++ s = WIFEXITED (s) ? WEXITSTATUS (s) : 1; ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO" ++then : ++ ac_cv_header_sys_wait_h=yes ++else $as_nop ++ ac_cv_header_sys_wait_h=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++fi ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_sys_wait_h" >&5 ++printf "%s\n" "$ac_cv_header_sys_wait_h" >&6; } ++if test $ac_cv_header_sys_wait_h = yes; then ++ ++printf "%s\n" "#define HAVE_SYS_WAIT_H 1" >>confdefs.h ++ ++fi ++ ++ac_config_files="$ac_config_files Makefile" ++ ++ac_config_headers="$ac_config_headers config.h" ++ ++cat >confcache <<\_ACEOF ++# This file is a shell script that caches the results of configure ++# tests run on this system so they can be shared between configure ++# scripts and configure runs, see configure's option --config-cache. ++# It is not useful on other systems. If it contains results you don't ++# want to keep, you may remove or edit it. ++# ++# config.status only pays attention to the cache file if you give it ++# the --recheck option to rerun configure. ++# ++# `ac_cv_env_foo' variables (set or unset) will be overridden when ++# loading this file, other *unset* `ac_cv_foo' will be assigned the ++# following values. ++ ++_ACEOF ++ ++# The following way of writing the cache mishandles newlines in values, ++# but we know of no workaround that is simple, portable, and efficient. ++# So, we kill variables containing newlines. ++# Ultrix sh set writes to stderr and can't be redirected directly, ++# and sets the high bit in the cache file unless we assign to the vars. ++( ++ for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do ++ eval ac_val=\$$ac_var ++ case $ac_val in #( ++ *${as_nl}*) ++ case $ac_var in #( ++ *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 ++printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; ++ esac ++ case $ac_var in #( ++ _ | IFS | as_nl) ;; #( ++ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( ++ *) { eval $ac_var=; unset $ac_var;} ;; ++ esac ;; ++ esac ++ done ++ ++ (set) 2>&1 | ++ case $as_nl`(ac_space=' '; set) 2>&1` in #( ++ *${as_nl}ac_space=\ *) ++ # `set' does not quote correctly, so add quotes: double-quote ++ # substitution turns \\\\ into \\, and sed turns \\ into \. ++ sed -n \ ++ "s/'/'\\\\''/g; ++ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ++ ;; #( ++ *) ++ # `set' quotes correctly as required by POSIX, so do not add quotes. ++ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ++ ;; ++ esac | ++ sort ++) | ++ sed ' ++ /^ac_cv_env_/b end ++ t clear ++ :clear ++ s/^\([^=]*\)=\(.*[{}].*\)$/test ${\1+y} || &/ ++ t end ++ s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ ++ :end' >>confcache ++if diff "$cache_file" confcache >/dev/null 2>&1; then :; else ++ if test -w "$cache_file"; then ++ if test "x$cache_file" != "x/dev/null"; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 ++printf "%s\n" "$as_me: updating cache $cache_file" >&6;} ++ if test ! -f "$cache_file" || test -h "$cache_file"; then ++ cat confcache >"$cache_file" ++ else ++ case $cache_file in #( ++ */* | ?:*) ++ mv -f confcache "$cache_file"$$ && ++ mv -f "$cache_file"$$ "$cache_file" ;; #( ++ *) ++ mv -f confcache "$cache_file" ;; ++ esac ++ fi ++ fi ++ else ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 ++printf "%s\n" "$as_me: not updating unwritable cache $cache_file" >&6;} ++ fi ++fi ++rm -f confcache ++ ++test "x$prefix" = xNONE && prefix=$ac_default_prefix ++# Let make expand exec_prefix. ++test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' ++ ++DEFS=-DHAVE_CONFIG_H ++ ++ac_libobjs= ++ac_ltlibobjs= ++U= ++for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue ++ # 1. Remove the extension, and $U if already installed. ++ ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ++ ac_i=`printf "%s\n" "$ac_i" | sed "$ac_script"` ++ # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR ++ # will be set to the directory where LIBOBJS objects are built. ++ as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" ++ as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' ++done ++LIBOBJS=$ac_libobjs ++ ++LTLIBOBJS=$ac_ltlibobjs ++ ++ ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 ++printf %s "checking that generated files are newer than configure... " >&6; } ++ if test -n "$am_sleep_pid"; then ++ # Hide warnings about reused PIDs. ++ wait $am_sleep_pid 2>/dev/null ++ fi ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: done" >&5 ++printf "%s\n" "done" >&6; } ++ if test -n "$EXEEXT"; then ++ am__EXEEXT_TRUE= ++ am__EXEEXT_FALSE='#' ++else ++ am__EXEEXT_TRUE='#' ++ am__EXEEXT_FALSE= ++fi ++ ++if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then ++ as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined. ++Usually this means the macro was only invoked conditionally." "$LINENO" 5 ++fi ++if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then ++ as_fn_error $? "conditional \"AMDEP\" was never defined. ++Usually this means the macro was only invoked conditionally." "$LINENO" 5 ++fi ++if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then ++ as_fn_error $? "conditional \"am__fastdepCC\" was never defined. ++Usually this means the macro was only invoked conditionally." "$LINENO" 5 ++fi ++if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then ++ as_fn_error $? "conditional \"am__fastdepCC\" was never defined. ++Usually this means the macro was only invoked conditionally." "$LINENO" 5 ++fi ++if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then ++ as_fn_error $? "conditional \"am__fastdepCXX\" was never defined. ++Usually this means the macro was only invoked conditionally." "$LINENO" 5 ++fi ++ ++: "${CONFIG_STATUS=./config.status}" ++ac_write_fail=0 ++ac_clean_files_save=$ac_clean_files ++ac_clean_files="$ac_clean_files $CONFIG_STATUS" ++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 ++printf "%s\n" "$as_me: creating $CONFIG_STATUS" >&6;} ++as_write_fail=0 ++cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 ++#! $SHELL ++# Generated by $as_me. ++# Run this file to recreate the current configuration. ++# Compiler output produced by configure, useful for debugging ++# configure, is in config.log if it exists. ++ ++debug=false ++ac_cs_recheck=false ++ac_cs_silent=false ++ ++SHELL=\${CONFIG_SHELL-$SHELL} ++export SHELL ++_ASEOF ++cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ++## -------------------- ## ++## M4sh Initialization. ## ++## -------------------- ## ++ ++# Be more Bourne compatible ++DUALCASE=1; export DUALCASE # for MKS sh ++as_nop=: ++if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 ++then : ++ emulate sh ++ NULLCMD=: ++ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which ++ # is contrary to our usage. Disable this feature. ++ alias -g '${1+"$@"}'='"$@"' ++ setopt NO_GLOB_SUBST ++else $as_nop ++ case `(set -o) 2>/dev/null` in #( ++ *posix*) : ++ set -o posix ;; #( ++ *) : ++ ;; ++esac ++fi ++ ++ ++ ++# Reset variables that may have inherited troublesome values from ++# the environment. ++ ++# IFS needs to be set, to space, tab, and newline, in precisely that order. ++# (If _AS_PATH_WALK were called with IFS unset, it would have the ++# side effect of setting IFS to empty, thus disabling word splitting.) ++# Quoting is to prevent editors from complaining about space-tab. ++as_nl=' ++' ++export as_nl ++IFS=" "" $as_nl" ++ ++PS1='$ ' ++PS2='> ' ++PS4='+ ' ++ ++# Ensure predictable behavior from utilities with locale-dependent output. ++LC_ALL=C ++export LC_ALL ++LANGUAGE=C ++export LANGUAGE ++ ++# We cannot yet rely on "unset" to work, but we need these variables ++# to be unset--not just set to an empty or harmless value--now, to ++# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh). This construct ++# also avoids known problems related to "unset" and subshell syntax ++# in other old shells (e.g. bash 2.01 and pdksh 5.2.14). ++for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH ++do eval test \${$as_var+y} \ ++ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : ++done ++ ++# Ensure that fds 0, 1, and 2 are open. ++if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi ++if (exec 3>&2) ; then :; else exec 2>/dev/null; fi ++ ++# The user is always right. ++if ${PATH_SEPARATOR+false} :; then ++ PATH_SEPARATOR=: ++ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { ++ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || ++ PATH_SEPARATOR=';' ++ } ++fi ++ ++ ++# Find who we are. Look in the path if we contain no directory separator. ++as_myself= ++case $0 in #(( ++ *[\\/]* ) as_myself=$0 ;; ++ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ case $as_dir in #((( ++ '') as_dir=./ ;; ++ */) ;; ++ *) as_dir=$as_dir/ ;; ++ esac ++ test -r "$as_dir$0" && as_myself=$as_dir$0 && break ++ done ++IFS=$as_save_IFS ++ ++ ;; ++esac ++# We did not find ourselves, most probably we were run as `sh COMMAND' ++# in which case we are not to be found in the path. ++if test "x$as_myself" = x; then ++ as_myself=$0 ++fi ++if test ! -f "$as_myself"; then ++ printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 ++ exit 1 ++fi ++ ++ ++ ++# as_fn_error STATUS ERROR [LINENO LOG_FD] ++# ---------------------------------------- ++# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are ++# provided, also output the error to LOG_FD, referencing LINENO. Then exit the ++# script with STATUS, using 1 if that was 0. ++as_fn_error () ++{ ++ as_status=$1; test $as_status -eq 0 && as_status=1 ++ if test "$4"; then ++ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 ++ fi ++ printf "%s\n" "$as_me: error: $2" >&2 ++ as_fn_exit $as_status ++} # as_fn_error ++ ++ ++ ++# as_fn_set_status STATUS ++# ----------------------- ++# Set $? to STATUS, without forking. ++as_fn_set_status () ++{ ++ return $1 ++} # as_fn_set_status ++ ++# as_fn_exit STATUS ++# ----------------- ++# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. ++as_fn_exit () ++{ ++ set +e ++ as_fn_set_status $1 ++ exit $1 ++} # as_fn_exit ++ ++# as_fn_unset VAR ++# --------------- ++# Portably unset VAR. ++as_fn_unset () ++{ ++ { eval $1=; unset $1;} ++} ++as_unset=as_fn_unset ++ ++# as_fn_append VAR VALUE ++# ---------------------- ++# Append the text in VALUE to the end of the definition contained in VAR. Take ++# advantage of any shell optimizations that allow amortized linear growth over ++# repeated appends, instead of the typical quadratic growth present in naive ++# implementations. ++if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null ++then : ++ eval 'as_fn_append () ++ { ++ eval $1+=\$2 ++ }' ++else $as_nop ++ as_fn_append () ++ { ++ eval $1=\$$1\$2 ++ } ++fi # as_fn_append ++ ++# as_fn_arith ARG... ++# ------------------ ++# Perform arithmetic evaluation on the ARGs, and store the result in the ++# global $as_val. Take advantage of shells that can avoid forks. The arguments ++# must be portable across $(()) and expr. ++if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null ++then : ++ eval 'as_fn_arith () ++ { ++ as_val=$(( $* )) ++ }' ++else $as_nop ++ as_fn_arith () ++ { ++ as_val=`expr "$@" || test $? -eq 1` ++ } ++fi # as_fn_arith ++ ++ ++if expr a : '\(a\)' >/dev/null 2>&1 && ++ test "X`expr 00001 : '.*\(...\)'`" = X001; then ++ as_expr=expr ++else ++ as_expr=false ++fi ++ ++if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then ++ as_basename=basename ++else ++ as_basename=false ++fi ++ ++if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then ++ as_dirname=dirname ++else ++ as_dirname=false ++fi ++ ++as_me=`$as_basename -- "$0" || ++$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ ++ X"$0" : 'X\(//\)$' \| \ ++ X"$0" : 'X\(/\)' \| . 2>/dev/null || ++printf "%s\n" X/"$0" | ++ sed '/^.*\/\([^/][^/]*\)\/*$/{ ++ s//\1/ ++ q ++ } ++ /^X\/\(\/\/\)$/{ ++ s//\1/ ++ q ++ } ++ /^X\/\(\/\).*/{ ++ s//\1/ ++ q ++ } ++ s/.*/./; q'` ++ ++# Avoid depending upon Character Ranges. ++as_cr_letters='abcdefghijklmnopqrstuvwxyz' ++as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' ++as_cr_Letters=$as_cr_letters$as_cr_LETTERS ++as_cr_digits='0123456789' ++as_cr_alnum=$as_cr_Letters$as_cr_digits ++ ++ ++# Determine whether it's possible to make 'echo' print without a newline. ++# These variables are no longer used directly by Autoconf, but are AC_SUBSTed ++# for compatibility with existing Makefiles. ++ECHO_C= ECHO_N= ECHO_T= ++case `echo -n x` in #((((( ++-n*) ++ case `echo 'xy\c'` in ++ *c*) ECHO_T=' ';; # ECHO_T is single tab character. ++ xy) ECHO_C='\c';; ++ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ++ ECHO_T=' ';; ++ esac;; ++*) ++ ECHO_N='-n';; ++esac ++ ++# For backward compatibility with old third-party macros, we provide ++# the shell variables $as_echo and $as_echo_n. New code should use ++# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively. ++as_echo='printf %s\n' ++as_echo_n='printf %s' ++ ++rm -f conf$$ conf$$.exe conf$$.file ++if test -d conf$$.dir; then ++ rm -f conf$$.dir/conf$$.file ++else ++ rm -f conf$$.dir ++ mkdir conf$$.dir 2>/dev/null ++fi ++if (echo >conf$$.file) 2>/dev/null; then ++ if ln -s conf$$.file conf$$ 2>/dev/null; then ++ as_ln_s='ln -s' ++ # ... but there are two gotchas: ++ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. ++ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. ++ # In both cases, we have to default to `cp -pR'. ++ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || ++ as_ln_s='cp -pR' ++ elif ln conf$$.file conf$$ 2>/dev/null; then ++ as_ln_s=ln ++ else ++ as_ln_s='cp -pR' ++ fi ++else ++ as_ln_s='cp -pR' ++fi ++rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file ++rmdir conf$$.dir 2>/dev/null ++ ++ ++# as_fn_mkdir_p ++# ------------- ++# Create "$as_dir" as a directory, including parents if necessary. ++as_fn_mkdir_p () ++{ ++ ++ case $as_dir in #( ++ -*) as_dir=./$as_dir;; ++ esac ++ test -d "$as_dir" || eval $as_mkdir_p || { ++ as_dirs= ++ while :; do ++ case $as_dir in #( ++ *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( ++ *) as_qdir=$as_dir;; ++ esac ++ as_dirs="'$as_qdir' $as_dirs" ++ as_dir=`$as_dirname -- "$as_dir" || ++$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ ++ X"$as_dir" : 'X\(//\)[^/]' \| \ ++ X"$as_dir" : 'X\(//\)$' \| \ ++ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || ++printf "%s\n" X"$as_dir" | ++ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\/\)[^/].*/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\/\)$/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\).*/{ ++ s//\1/ ++ q ++ } ++ s/.*/./; q'` ++ test -d "$as_dir" && break ++ done ++ test -z "$as_dirs" || eval "mkdir $as_dirs" ++ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" ++ ++ ++} # as_fn_mkdir_p ++if mkdir -p . 2>/dev/null; then ++ as_mkdir_p='mkdir -p "$as_dir"' ++else ++ test -d ./-p && rmdir ./-p ++ as_mkdir_p=false ++fi ++ ++ ++# as_fn_executable_p FILE ++# ----------------------- ++# Test if FILE is an executable regular file. ++as_fn_executable_p () ++{ ++ test -f "$1" && test -x "$1" ++} # as_fn_executable_p ++as_test_x='test -x' ++as_executable_p=as_fn_executable_p ++ ++# Sed expression to map a string onto a valid CPP name. ++as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" ++ ++# Sed expression to map a string onto a valid variable name. ++as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" ++ ++ ++exec 6>&1 ++## ----------------------------------- ## ++## Main body of $CONFIG_STATUS script. ## ++## ----------------------------------- ## ++_ASEOF ++test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 ++ ++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ++# Save the log message, to keep $0 and so on meaningful, and to ++# report actual input values of CONFIG_FILES etc. instead of their ++# values after options handling. ++ac_log=" ++This file was extended by bolt plugin for ld $as_me 0.1, which was ++generated by GNU Autoconf 2.71. Invocation command line was ++ ++ CONFIG_FILES = $CONFIG_FILES ++ CONFIG_HEADERS = $CONFIG_HEADERS ++ CONFIG_LINKS = $CONFIG_LINKS ++ CONFIG_COMMANDS = $CONFIG_COMMANDS ++ $ $0 $@ ++ ++on `(hostname || uname -n) 2>/dev/null | sed 1q` ++" ++ ++_ACEOF ++ ++case $ac_config_files in *" ++"*) set x $ac_config_files; shift; ac_config_files=$*;; ++esac ++ ++case $ac_config_headers in *" ++"*) set x $ac_config_headers; shift; ac_config_headers=$*;; ++esac ++ ++ ++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ++# Files that config.status was made for. ++config_files="$ac_config_files" ++config_headers="$ac_config_headers" ++config_commands="$ac_config_commands" ++ ++_ACEOF ++ ++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ++ac_cs_usage="\ ++\`$as_me' instantiates files and other configuration actions ++from templates according to the current configuration. Unless the files ++and actions are specified as TAGs, all are instantiated by default. ++ ++Usage: $0 [OPTION]... [TAG]... ++ ++ -h, --help print this help, then exit ++ -V, --version print version number and configuration settings, then exit ++ --config print configuration, then exit ++ -q, --quiet, --silent ++ do not print progress messages ++ -d, --debug don't remove temporary files ++ --recheck update $as_me by reconfiguring in the same conditions ++ --file=FILE[:TEMPLATE] ++ instantiate the configuration file FILE ++ --header=FILE[:TEMPLATE] ++ instantiate the configuration header FILE ++ ++Configuration files: ++$config_files ++ ++Configuration headers: ++$config_headers ++ ++Configuration commands: ++$config_commands ++ ++Report bugs to the package provider." ++ ++_ACEOF ++ac_cs_config=`printf "%s\n" "$ac_configure_args" | sed "$ac_safe_unquote"` ++ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\''/g"` ++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ++ac_cs_config='$ac_cs_config_escaped' ++ac_cs_version="\\ ++bolt plugin for ld config.status 0.1 ++configured by $0, generated by GNU Autoconf 2.71, ++ with options \\"\$ac_cs_config\\" ++ ++Copyright (C) 2021 Free Software Foundation, Inc. ++This config.status script is free software; the Free Software Foundation ++gives unlimited permission to copy, distribute and modify it." ++ ++ac_pwd='$ac_pwd' ++srcdir='$srcdir' ++INSTALL='$INSTALL' ++MKDIR_P='$MKDIR_P' ++AWK='$AWK' ++test -n "\$AWK" || AWK=awk ++_ACEOF ++ ++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ++# The default lists apply if the user does not specify any file. ++ac_need_defaults=: ++while test $# != 0 ++do ++ case $1 in ++ --*=?*) ++ ac_option=`expr "X$1" : 'X\([^=]*\)='` ++ ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ++ ac_shift=: ++ ;; ++ --*=) ++ ac_option=`expr "X$1" : 'X\([^=]*\)='` ++ ac_optarg= ++ ac_shift=: ++ ;; ++ *) ++ ac_option=$1 ++ ac_optarg=$2 ++ ac_shift=shift ++ ;; ++ esac ++ ++ case $ac_option in ++ # Handling of the options. ++ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ++ ac_cs_recheck=: ;; ++ --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) ++ printf "%s\n" "$ac_cs_version"; exit ;; ++ --config | --confi | --conf | --con | --co | --c ) ++ printf "%s\n" "$ac_cs_config"; exit ;; ++ --debug | --debu | --deb | --de | --d | -d ) ++ debug=: ;; ++ --file | --fil | --fi | --f ) ++ $ac_shift ++ case $ac_optarg in ++ *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; ++ '') as_fn_error $? "missing file argument" ;; ++ esac ++ as_fn_append CONFIG_FILES " '$ac_optarg'" ++ ac_need_defaults=false;; ++ --header | --heade | --head | --hea ) ++ $ac_shift ++ case $ac_optarg in ++ *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; ++ esac ++ as_fn_append CONFIG_HEADERS " '$ac_optarg'" ++ ac_need_defaults=false;; ++ --he | --h) ++ # Conflict between --help and --header ++ as_fn_error $? "ambiguous option: \`$1' ++Try \`$0 --help' for more information.";; ++ --help | --hel | -h ) ++ printf "%s\n" "$ac_cs_usage"; exit ;; ++ -q | -quiet | --quiet | --quie | --qui | --qu | --q \ ++ | -silent | --silent | --silen | --sile | --sil | --si | --s) ++ ac_cs_silent=: ;; ++ ++ # This is an error. ++ -*) as_fn_error $? "unrecognized option: \`$1' ++Try \`$0 --help' for more information." ;; ++ ++ *) as_fn_append ac_config_targets " $1" ++ ac_need_defaults=false ;; ++ ++ esac ++ shift ++done ++ ++ac_configure_extra_args= ++ ++if $ac_cs_silent; then ++ exec 6>/dev/null ++ ac_configure_extra_args="$ac_configure_extra_args --silent" ++fi ++ ++_ACEOF ++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ++if \$ac_cs_recheck; then ++ set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion ++ shift ++ \printf "%s\n" "running CONFIG_SHELL=$SHELL \$*" >&6 ++ CONFIG_SHELL='$SHELL' ++ export CONFIG_SHELL ++ exec "\$@" ++fi ++ ++_ACEOF ++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ++exec 5>>config.log ++{ ++ echo ++ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ++## Running $as_me. ## ++_ASBOX ++ printf "%s\n" "$ac_log" ++} >&5 ++ ++_ACEOF ++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ++# ++# INIT-COMMANDS ++# ++AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}" ++ ++ ++# The HP-UX ksh and POSIX shell print the target directory to stdout ++# if CDPATH is set. ++(unset CDPATH) >/dev/null 2>&1 && unset CDPATH ++ ++sed_quote_subst='$sed_quote_subst' ++double_quote_subst='$double_quote_subst' ++delay_variable_subst='$delay_variable_subst' ++macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' ++macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' ++enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' ++enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' ++pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' ++enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' ++shared_archive_member_spec='`$ECHO "$shared_archive_member_spec" | $SED "$delay_single_quote_subst"`' ++SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' ++ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' ++PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' ++host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' ++host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' ++host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' ++build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' ++build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' ++build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' ++SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' ++Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' ++GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' ++EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' ++FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' ++LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' ++NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' ++LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' ++max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' ++ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' ++exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' ++lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' ++lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' ++lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' ++reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' ++reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' ++FILECMD='`$ECHO "$FILECMD" | $SED "$delay_single_quote_subst"`' ++OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' ++deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' ++file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' ++file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' ++want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' ++DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' ++sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' ++AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' ++lt_ar_flags='`$ECHO "$lt_ar_flags" | $SED "$delay_single_quote_subst"`' ++AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' ++archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' ++STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' ++RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' ++old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' ++old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' ++old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' ++lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' ++CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' ++CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' ++compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' ++GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' ++lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' ++lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' ++lt_cv_sys_global_symbol_to_import='`$ECHO "$lt_cv_sys_global_symbol_to_import" | $SED "$delay_single_quote_subst"`' ++lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' ++lt_cv_nm_interface='`$ECHO "$lt_cv_nm_interface" | $SED "$delay_single_quote_subst"`' ++nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' ++lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' ++lt_cv_truncate_bin='`$ECHO "$lt_cv_truncate_bin" | $SED "$delay_single_quote_subst"`' ++objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' ++MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' ++lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' ++need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' ++MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' ++DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' ++NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' ++LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' ++OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' ++OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' ++libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' ++shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' ++extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' ++archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' ++enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' ++export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' ++whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' ++compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' ++old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' ++old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' ++archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' ++archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' ++module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' ++module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' ++with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' ++allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' ++no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' ++hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' ++hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' ++hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' ++hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' ++hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' ++hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' ++hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' ++inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' ++link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' ++always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' ++export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' ++exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' ++include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' ++prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' ++postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' ++file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' ++variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' ++need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' ++need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' ++version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' ++runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' ++shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' ++shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' ++libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' ++library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' ++soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' ++install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' ++postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' ++postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' ++finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' ++finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' ++hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' ++sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' ++configure_time_dlsearch_path='`$ECHO "$configure_time_dlsearch_path" | $SED "$delay_single_quote_subst"`' ++configure_time_lt_sys_library_path='`$ECHO "$configure_time_lt_sys_library_path" | $SED "$delay_single_quote_subst"`' ++hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' ++enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' ++enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' ++enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' ++old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' ++striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' ++compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`' ++predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`' ++postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`' ++predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`' ++postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`' ++compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`' ++LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`' ++reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`' ++reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`' ++old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' ++compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' ++GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' ++lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' ++archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' ++enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`' ++export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' ++whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' ++compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`' ++old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`' ++old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`' ++archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' ++archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' ++module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`' ++module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' ++with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`' ++allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' ++no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' ++hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' ++hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`' ++hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`' ++hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`' ++hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`' ++hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`' ++hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' ++inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' ++link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' ++always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' ++export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' ++exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' ++include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' ++prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' ++postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' ++file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' ++hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' ++compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' ++predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`' ++postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`' ++predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`' ++postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`' ++compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`' ++ ++LTCC='$LTCC' ++LTCFLAGS='$LTCFLAGS' ++compiler='$compiler_DEFAULT' ++ ++# A function that is used when there is no print builtin or printf. ++func_fallback_echo () ++{ ++ eval 'cat <<_LTECHO_EOF ++\$1 ++_LTECHO_EOF' ++} ++ ++# Quote evaled strings. ++for var in SHELL \ ++ECHO \ ++PATH_SEPARATOR \ ++SED \ ++GREP \ ++EGREP \ ++FGREP \ ++LD \ ++NM \ ++LN_S \ ++lt_SP2NL \ ++lt_NL2SP \ ++reload_flag \ ++FILECMD \ ++OBJDUMP \ ++deplibs_check_method \ ++file_magic_cmd \ ++file_magic_glob \ ++want_nocaseglob \ ++DLLTOOL \ ++sharedlib_from_linklib_cmd \ ++AR \ ++archiver_list_spec \ ++STRIP \ ++RANLIB \ ++CC \ ++CFLAGS \ ++compiler \ ++lt_cv_sys_global_symbol_pipe \ ++lt_cv_sys_global_symbol_to_cdecl \ ++lt_cv_sys_global_symbol_to_import \ ++lt_cv_sys_global_symbol_to_c_name_address \ ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ ++lt_cv_nm_interface \ ++nm_file_list_spec \ ++lt_cv_truncate_bin \ ++lt_prog_compiler_no_builtin_flag \ ++lt_prog_compiler_pic \ ++lt_prog_compiler_wl \ ++lt_prog_compiler_static \ ++lt_cv_prog_compiler_c_o \ ++need_locks \ ++MANIFEST_TOOL \ ++DSYMUTIL \ ++NMEDIT \ ++LIPO \ ++OTOOL \ ++OTOOL64 \ ++shrext_cmds \ ++export_dynamic_flag_spec \ ++whole_archive_flag_spec \ ++compiler_needs_object \ ++with_gnu_ld \ ++allow_undefined_flag \ ++no_undefined_flag \ ++hardcode_libdir_flag_spec \ ++hardcode_libdir_separator \ ++exclude_expsyms \ ++include_expsyms \ ++file_list_spec \ ++variables_saved_for_relink \ ++libname_spec \ ++library_names_spec \ ++soname_spec \ ++install_override_mode \ ++finish_eval \ ++old_striplib \ ++striplib \ ++compiler_lib_search_dirs \ ++predep_objects \ ++postdep_objects \ ++predeps \ ++postdeps \ ++compiler_lib_search_path \ ++LD_CXX \ ++reload_flag_CXX \ ++compiler_CXX \ ++lt_prog_compiler_no_builtin_flag_CXX \ ++lt_prog_compiler_pic_CXX \ ++lt_prog_compiler_wl_CXX \ ++lt_prog_compiler_static_CXX \ ++lt_cv_prog_compiler_c_o_CXX \ ++export_dynamic_flag_spec_CXX \ ++whole_archive_flag_spec_CXX \ ++compiler_needs_object_CXX \ ++with_gnu_ld_CXX \ ++allow_undefined_flag_CXX \ ++no_undefined_flag_CXX \ ++hardcode_libdir_flag_spec_CXX \ ++hardcode_libdir_separator_CXX \ ++exclude_expsyms_CXX \ ++include_expsyms_CXX \ ++file_list_spec_CXX \ ++compiler_lib_search_dirs_CXX \ ++predep_objects_CXX \ ++postdep_objects_CXX \ ++predeps_CXX \ ++postdeps_CXX \ ++compiler_lib_search_path_CXX; do ++ case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in ++ *[\\\\\\\`\\"\\\$]*) ++ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ++ ;; ++ *) ++ eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ++ ;; ++ esac ++done ++ ++# Double-quote double-evaled strings. ++for var in reload_cmds \ ++old_postinstall_cmds \ ++old_postuninstall_cmds \ ++old_archive_cmds \ ++extract_expsyms_cmds \ ++old_archive_from_new_cmds \ ++old_archive_from_expsyms_cmds \ ++archive_cmds \ ++archive_expsym_cmds \ ++module_cmds \ ++module_expsym_cmds \ ++export_symbols_cmds \ ++prelink_cmds \ ++postlink_cmds \ ++postinstall_cmds \ ++postuninstall_cmds \ ++finish_cmds \ ++sys_lib_search_path_spec \ ++configure_time_dlsearch_path \ ++configure_time_lt_sys_library_path \ ++reload_cmds_CXX \ ++old_archive_cmds_CXX \ ++old_archive_from_new_cmds_CXX \ ++old_archive_from_expsyms_cmds_CXX \ ++archive_cmds_CXX \ ++archive_expsym_cmds_CXX \ ++module_cmds_CXX \ ++module_expsym_cmds_CXX \ ++export_symbols_cmds_CXX \ ++prelink_cmds_CXX \ ++postlink_cmds_CXX; do ++ case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in ++ *[\\\\\\\`\\"\\\$]*) ++ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ++ ;; ++ *) ++ eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ++ ;; ++ esac ++done ++ ++ac_aux_dir='$ac_aux_dir' ++ ++# See if we are running on zsh, and set the options that allow our ++# commands through without removal of \ escapes INIT. ++if test -n "\${ZSH_VERSION+set}"; then ++ setopt NO_GLOB_SUBST ++fi ++ ++ ++ PACKAGE='$PACKAGE' ++ VERSION='$VERSION' ++ RM='$RM' ++ ofile='$ofile' ++ ++ ++ ++ ++ ++ ++_ACEOF ++ ++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ++ ++# Handling of arguments. ++for ac_config_target in $ac_config_targets ++do ++ case $ac_config_target in ++ "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; ++ "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; ++ "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; ++ "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; ++ ++ *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; ++ esac ++done ++ ++ ++# If the user did not use the arguments to specify the items to instantiate, ++# then the envvar interface is used. Set only those that are not. ++# We use the long form for the default assignment because of an extremely ++# bizarre bug on SunOS 4.1.3. ++if $ac_need_defaults; then ++ test ${CONFIG_FILES+y} || CONFIG_FILES=$config_files ++ test ${CONFIG_HEADERS+y} || CONFIG_HEADERS=$config_headers ++ test ${CONFIG_COMMANDS+y} || CONFIG_COMMANDS=$config_commands ++fi ++ ++# Have a temporary directory for convenience. Make it in the build tree ++# simply because there is no reason against having it here, and in addition, ++# creating and moving files from /tmp can sometimes cause problems. ++# Hook for its removal unless debugging. ++# Note that there is a small window in which the directory will not be cleaned: ++# after its creation but before its name has been assigned to `$tmp'. ++$debug || ++{ ++ tmp= ac_tmp= ++ trap 'exit_status=$? ++ : "${ac_tmp:=$tmp}" ++ { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ++' 0 ++ trap 'as_fn_exit 1' 1 2 13 15 ++} ++# Create a (secure) tmp directory for tmp files. ++ ++{ ++ tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && ++ test -d "$tmp" ++} || ++{ ++ tmp=./conf$$-$RANDOM ++ (umask 077 && mkdir "$tmp") ++} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ++ac_tmp=$tmp ++ ++# Set up the scripts for CONFIG_FILES section. ++# No need to generate them if there are no CONFIG_FILES. ++# This happens for instance with `./config.status config.h'. ++if test -n "$CONFIG_FILES"; then ++ ++ ++ac_cr=`echo X | tr X '\015'` ++# On cygwin, bash can eat \r inside `` if the user requested igncr. ++# But we know of no other shell where ac_cr would be empty at this ++# point, so we can use a bashism as a fallback. ++if test "x$ac_cr" = x; then ++ eval ac_cr=\$\'\\r\' ++fi ++ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` ++if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ++ ac_cs_awk_cr='\\r' ++else ++ ac_cs_awk_cr=$ac_cr ++fi ++ ++echo 'BEGIN {' >"$ac_tmp/subs1.awk" && ++_ACEOF ++ ++ ++{ ++ echo "cat >conf$$subs.awk <<_ACEOF" && ++ echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && ++ echo "_ACEOF" ++} >conf$$subs.sh || ++ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ++ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ++ac_delim='%!_!# ' ++for ac_last_try in false false false false false :; do ++ . ./conf$$subs.sh || ++ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ++ ++ ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` ++ if test $ac_delim_n = $ac_delim_num; then ++ break ++ elif $ac_last_try; then ++ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ++ else ++ ac_delim="$ac_delim!$ac_delim _$ac_delim!! " ++ fi ++done ++rm -f conf$$subs.sh ++ ++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ++cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && ++_ACEOF ++sed -n ' ++h ++s/^/S["/; s/!.*/"]=/ ++p ++g ++s/^[^!]*!// ++:repl ++t repl ++s/'"$ac_delim"'$// ++t delim ++:nl ++h ++s/\(.\{148\}\)..*/\1/ ++t more1 ++s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ ++p ++n ++b repl ++:more1 ++s/["\\]/\\&/g; s/^/"/; s/$/"\\/ ++p ++g ++s/.\{148\}// ++t nl ++:delim ++h ++s/\(.\{148\}\)..*/\1/ ++t more2 ++s/["\\]/\\&/g; s/^/"/; s/$/"/ ++p ++b ++:more2 ++s/["\\]/\\&/g; s/^/"/; s/$/"\\/ ++p ++g ++s/.\{148\}// ++t delim ++' >$CONFIG_STATUS || ac_write_fail=1 ++rm -f conf$$subs.awk ++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ++_ACAWK ++cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && ++ for (key in S) S_is_set[key] = 1 ++ FS = "" ++ ++} ++{ ++ line = $ 0 ++ nfields = split(line, field, "@") ++ substed = 0 ++ len = length(field[1]) ++ for (i = 2; i < nfields; i++) { ++ key = field[i] ++ keylen = length(key) ++ if (S_is_set[key]) { ++ value = S[key] ++ line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) ++ len += length(value) + length(field[++i]) ++ substed = 1 ++ } else ++ len += 1 + keylen ++ } ++ ++ print line ++} ++ ++_ACAWK ++_ACEOF ++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ++if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then ++ sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" ++else ++ cat ++fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ ++ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 ++_ACEOF ++ ++# VPATH may cause trouble with some makes, so we remove sole $(srcdir), ++# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and ++# trailing colons and then remove the whole line if VPATH becomes empty ++# (actually we leave an empty line to preserve line numbers). ++if test "x$srcdir" = x.; then ++ ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ ++h ++s/// ++s/^/:/ ++s/[ ]*$/:/ ++s/:\$(srcdir):/:/g ++s/:\${srcdir}:/:/g ++s/:@srcdir@:/:/g ++s/^:*// ++s/:*$// ++x ++s/\(=[ ]*\).*/\1/ ++G ++s/\n// ++s/^[^=]*=[ ]*$// ++}' ++fi ++ ++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ++fi # test -n "$CONFIG_FILES" ++ ++# Set up the scripts for CONFIG_HEADERS section. ++# No need to generate them if there are no CONFIG_HEADERS. ++# This happens for instance with `./config.status Makefile'. ++if test -n "$CONFIG_HEADERS"; then ++cat >"$ac_tmp/defines.awk" <<\_ACAWK || ++BEGIN { ++_ACEOF ++ ++# Transform confdefs.h into an awk script `defines.awk', embedded as ++# here-document in config.status, that substitutes the proper values into ++# config.h.in to produce config.h. ++ ++# Create a delimiter string that does not exist in confdefs.h, to ease ++# handling of long lines. ++ac_delim='%!_!# ' ++for ac_last_try in false false :; do ++ ac_tt=`sed -n "/$ac_delim/p" confdefs.h` ++ if test -z "$ac_tt"; then ++ break ++ elif $ac_last_try; then ++ as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 ++ else ++ ac_delim="$ac_delim!$ac_delim _$ac_delim!! " ++ fi ++done ++ ++# For the awk script, D is an array of macro values keyed by name, ++# likewise P contains macro parameters if any. Preserve backslash ++# newline sequences. ++ ++ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* ++sed -n ' ++s/.\{148\}/&'"$ac_delim"'/g ++t rset ++:rset ++s/^[ ]*#[ ]*define[ ][ ]*/ / ++t def ++d ++:def ++s/\\$// ++t bsnl ++s/["\\]/\\&/g ++s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ ++D["\1"]=" \3"/p ++s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p ++d ++:bsnl ++s/["\\]/\\&/g ++s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ ++D["\1"]=" \3\\\\\\n"\\/p ++t cont ++s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p ++t cont ++d ++:cont ++n ++s/.\{148\}/&'"$ac_delim"'/g ++t clear ++:clear ++s/\\$// ++t bsnlc ++s/["\\]/\\&/g; s/^/"/; s/$/"/p ++d ++:bsnlc ++s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p ++b cont ++' >$CONFIG_STATUS || ac_write_fail=1 ++ ++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ++ for (key in D) D_is_set[key] = 1 ++ FS = "" ++} ++/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { ++ line = \$ 0 ++ split(line, arg, " ") ++ if (arg[1] == "#") { ++ defundef = arg[2] ++ mac1 = arg[3] ++ } else { ++ defundef = substr(arg[1], 2) ++ mac1 = arg[2] ++ } ++ split(mac1, mac2, "(") #) ++ macro = mac2[1] ++ prefix = substr(line, 1, index(line, defundef) - 1) ++ if (D_is_set[macro]) { ++ # Preserve the white space surrounding the "#". ++ print prefix "define", macro P[macro] D[macro] ++ next ++ } else { ++ # Replace #undef with comments. This is necessary, for example, ++ # in the case of _POSIX_SOURCE, which is predefined and required ++ # on some systems where configure will not decide to define it. ++ if (defundef == "undef") { ++ print "/*", prefix defundef, macro, "*/" ++ next ++ } ++ } ++} ++{ print } ++_ACAWK ++_ACEOF ++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ++ as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 ++fi # test -n "$CONFIG_HEADERS" ++ ++ ++eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" ++shift ++for ac_tag ++do ++ case $ac_tag in ++ :[FHLC]) ac_mode=$ac_tag; continue;; ++ esac ++ case $ac_mode$ac_tag in ++ :[FHL]*:*);; ++ :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; ++ :[FH]-) ac_tag=-:-;; ++ :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; ++ esac ++ ac_save_IFS=$IFS ++ IFS=: ++ set x $ac_tag ++ IFS=$ac_save_IFS ++ shift ++ ac_file=$1 ++ shift ++ ++ case $ac_mode in ++ :L) ac_source=$1;; ++ :[FH]) ++ ac_file_inputs= ++ for ac_f ++ do ++ case $ac_f in ++ -) ac_f="$ac_tmp/stdin";; ++ *) # Look for the file first in the build tree, then in the source tree ++ # (if the path is not absolute). The absolute path cannot be DOS-style, ++ # because $ac_f cannot contain `:'. ++ test -f "$ac_f" || ++ case $ac_f in ++ [\\/$]*) false;; ++ *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; ++ esac || ++ as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; ++ esac ++ case $ac_f in *\'*) ac_f=`printf "%s\n" "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac ++ as_fn_append ac_file_inputs " '$ac_f'" ++ done ++ ++ # Let's still pretend it is `configure' which instantiates (i.e., don't ++ # use $as_me), people would be surprised to read: ++ # /* config.h. Generated by config.status. */ ++ configure_input='Generated from '` ++ printf "%s\n" "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' ++ `' by configure.' ++ if test x"$ac_file" != x-; then ++ configure_input="$ac_file. $configure_input" ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 ++printf "%s\n" "$as_me: creating $ac_file" >&6;} ++ fi ++ # Neutralize special characters interpreted by sed in replacement strings. ++ case $configure_input in #( ++ *\&* | *\|* | *\\* ) ++ ac_sed_conf_input=`printf "%s\n" "$configure_input" | ++ sed 's/[\\\\&|]/\\\\&/g'`;; #( ++ *) ac_sed_conf_input=$configure_input;; ++ esac ++ ++ case $ac_tag in ++ *:-:* | *:-) cat >"$ac_tmp/stdin" \ ++ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; ++ esac ++ ;; ++ esac ++ ++ ac_dir=`$as_dirname -- "$ac_file" || ++$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ ++ X"$ac_file" : 'X\(//\)[^/]' \| \ ++ X"$ac_file" : 'X\(//\)$' \| \ ++ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || ++printf "%s\n" X"$ac_file" | ++ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\/\)[^/].*/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\/\)$/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\).*/{ ++ s//\1/ ++ q ++ } ++ s/.*/./; q'` ++ as_dir="$ac_dir"; as_fn_mkdir_p ++ ac_builddir=. ++ ++case "$ac_dir" in ++.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; ++*) ++ ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'` ++ # A ".." for each directory in $ac_dir_suffix. ++ ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` ++ case $ac_top_builddir_sub in ++ "") ac_top_builddir_sub=. ac_top_build_prefix= ;; ++ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; ++ esac ;; ++esac ++ac_abs_top_builddir=$ac_pwd ++ac_abs_builddir=$ac_pwd$ac_dir_suffix ++# for backward compatibility: ++ac_top_builddir=$ac_top_build_prefix ++ ++case $srcdir in ++ .) # We are building in place. ++ ac_srcdir=. ++ ac_top_srcdir=$ac_top_builddir_sub ++ ac_abs_top_srcdir=$ac_pwd ;; ++ [\\/]* | ?:[\\/]* ) # Absolute name. ++ ac_srcdir=$srcdir$ac_dir_suffix; ++ ac_top_srcdir=$srcdir ++ ac_abs_top_srcdir=$srcdir ;; ++ *) # Relative name. ++ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ++ ac_top_srcdir=$ac_top_build_prefix$srcdir ++ ac_abs_top_srcdir=$ac_pwd/$srcdir ;; ++esac ++ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix ++ ++ ++ case $ac_mode in ++ :F) ++ # ++ # CONFIG_FILE ++ # ++ ++ case $INSTALL in ++ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; ++ *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; ++ esac ++ ac_MKDIR_P=$MKDIR_P ++ case $MKDIR_P in ++ [\\/$]* | ?:[\\/]* ) ;; ++ */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; ++ esac ++_ACEOF ++ ++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ++# If the template does not know about datarootdir, expand it. ++# FIXME: This hack should be removed a few years after 2.60. ++ac_datarootdir_hack=; ac_datarootdir_seen= ++ac_sed_dataroot=' ++/datarootdir/ { ++ p ++ q ++} ++/@datadir@/p ++/@docdir@/p ++/@infodir@/p ++/@localedir@/p ++/@mandir@/p' ++case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in ++*datarootdir*) ac_datarootdir_seen=yes;; ++*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 ++printf "%s\n" "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} ++_ACEOF ++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ++ ac_datarootdir_hack=' ++ s&@datadir@&$datadir&g ++ s&@docdir@&$docdir&g ++ s&@infodir@&$infodir&g ++ s&@localedir@&$localedir&g ++ s&@mandir@&$mandir&g ++ s&\\\${datarootdir}&$datarootdir&g' ;; ++esac ++_ACEOF ++ ++# Neutralize VPATH when `$srcdir' = `.'. ++# Shell code in configure.ac might set extrasub. ++# FIXME: do we really want to maintain this feature? ++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ++ac_sed_extra="$ac_vpsub ++$extrasub ++_ACEOF ++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ++:t ++/@[a-zA-Z_][a-zA-Z_0-9]*@/!b ++s|@configure_input@|$ac_sed_conf_input|;t t ++s&@top_builddir@&$ac_top_builddir_sub&;t t ++s&@top_build_prefix@&$ac_top_build_prefix&;t t ++s&@srcdir@&$ac_srcdir&;t t ++s&@abs_srcdir@&$ac_abs_srcdir&;t t ++s&@top_srcdir@&$ac_top_srcdir&;t t ++s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t ++s&@builddir@&$ac_builddir&;t t ++s&@abs_builddir@&$ac_abs_builddir&;t t ++s&@abs_top_builddir@&$ac_abs_top_builddir&;t t ++s&@INSTALL@&$ac_INSTALL&;t t ++s&@MKDIR_P@&$ac_MKDIR_P&;t t ++$ac_datarootdir_hack ++" ++eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ ++ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ++ ++test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && ++ { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && ++ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ ++ "$ac_tmp/out"`; test -z "$ac_out"; } && ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' ++which seems to be undefined. Please make sure it is defined" >&5 ++printf "%s\n" "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' ++which seems to be undefined. Please make sure it is defined" >&2;} ++ ++ rm -f "$ac_tmp/stdin" ++ case $ac_file in ++ -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; ++ *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; ++ esac \ ++ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ++ ;; ++ :H) ++ # ++ # CONFIG_HEADER ++ # ++ if test x"$ac_file" != x-; then ++ { ++ printf "%s\n" "/* $configure_input */" >&1 \ ++ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" ++ } >"$ac_tmp/config.h" \ ++ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ++ if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 ++printf "%s\n" "$as_me: $ac_file is unchanged" >&6;} ++ else ++ rm -f "$ac_file" ++ mv "$ac_tmp/config.h" "$ac_file" \ ++ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ++ fi ++ else ++ printf "%s\n" "/* $configure_input */" >&1 \ ++ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ ++ || as_fn_error $? "could not create -" "$LINENO" 5 ++ fi ++# Compute "$ac_file"'s index in $config_headers. ++_am_arg="$ac_file" ++_am_stamp_count=1 ++for _am_header in $config_headers :; do ++ case $_am_header in ++ $_am_arg | $_am_arg:* ) ++ break ;; ++ * ) ++ _am_stamp_count=`expr $_am_stamp_count + 1` ;; ++ esac ++done ++echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || ++$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ ++ X"$_am_arg" : 'X\(//\)[^/]' \| \ ++ X"$_am_arg" : 'X\(//\)$' \| \ ++ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || ++printf "%s\n" X"$_am_arg" | ++ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\/\)[^/].*/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\/\)$/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\).*/{ ++ s//\1/ ++ q ++ } ++ s/.*/./; q'`/stamp-h$_am_stamp_count ++ ;; ++ ++ :C) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 ++printf "%s\n" "$as_me: executing $ac_file commands" >&6;} ++ ;; ++ esac ++ ++ ++ case $ac_file$ac_mode in ++ "depfiles":C) test x"$AMDEP_TRUE" != x"" || { ++ # Older Autoconf quotes --file arguments for eval, but not when files ++ # are listed without --file. Let's play safe and only enable the eval ++ # if we detect the quoting. ++ # TODO: see whether this extra hack can be removed once we start ++ # requiring Autoconf 2.70 or later. ++ case $CONFIG_FILES in #( ++ *\'*) : ++ eval set x "$CONFIG_FILES" ;; #( ++ *) : ++ set x $CONFIG_FILES ;; #( ++ *) : ++ ;; ++esac ++ shift ++ # Used to flag and report bootstrapping failures. ++ am_rc=0 ++ for am_mf ++ do ++ # Strip MF so we end up with the name of the file. ++ am_mf=`printf "%s\n" "$am_mf" | sed -e 's/:.*$//'` ++ # Check whether this is an Automake generated Makefile which includes ++ # dependency-tracking related rules and includes. ++ # Grep'ing the whole file directly is not great: AIX grep has a line ++ # limit of 2048, but all sed's we know have understand at least 4000. ++ sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ ++ || continue ++ am_dirpart=`$as_dirname -- "$am_mf" || ++$as_expr X"$am_mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ ++ X"$am_mf" : 'X\(//\)[^/]' \| \ ++ X"$am_mf" : 'X\(//\)$' \| \ ++ X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || ++printf "%s\n" X"$am_mf" | ++ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\/\)[^/].*/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\/\)$/{ ++ s//\1/ ++ q ++ } ++ /^X\(\/\).*/{ ++ s//\1/ ++ q ++ } ++ s/.*/./; q'` ++ am_filepart=`$as_basename -- "$am_mf" || ++$as_expr X/"$am_mf" : '.*/\([^/][^/]*\)/*$' \| \ ++ X"$am_mf" : 'X\(//\)$' \| \ ++ X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || ++printf "%s\n" X/"$am_mf" | ++ sed '/^.*\/\([^/][^/]*\)\/*$/{ ++ s//\1/ ++ q ++ } ++ /^X\/\(\/\/\)$/{ ++ s//\1/ ++ q ++ } ++ /^X\/\(\/\).*/{ ++ s//\1/ ++ q ++ } ++ s/.*/./; q'` ++ { echo "$as_me:$LINENO: cd "$am_dirpart" \ ++ && sed -e '/# am--include-marker/d' "$am_filepart" \ ++ | $MAKE -f - am--depfiles" >&5 ++ (cd "$am_dirpart" \ ++ && sed -e '/# am--include-marker/d' "$am_filepart" \ ++ | $MAKE -f - am--depfiles) >&5 2>&5 ++ ac_status=$? ++ echo "$as_me:$LINENO: \$? = $ac_status" >&5 ++ (exit $ac_status); } || am_rc=$? ++ done ++ if test $am_rc -ne 0; then ++ { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++as_fn_error $? "Something went wrong bootstrapping makefile fragments ++ for automatic dependency tracking. If GNU make was not used, consider ++ re-running the configure script with MAKE=\"gmake\" (or whatever is ++ necessary). You can also try re-running configure with the ++ '--disable-dependency-tracking' option to at least be able to build ++ the package (albeit without support for automatic dependency tracking). ++See \`config.log' for more details" "$LINENO" 5; } ++ fi ++ { am_dirpart=; unset am_dirpart;} ++ { am_filepart=; unset am_filepart;} ++ { am_mf=; unset am_mf;} ++ { am_rc=; unset am_rc;} ++ rm -f conftest-deps.mk ++} ++ ;; ++ "libtool":C) ++ ++ # See if we are running on zsh, and set the options that allow our ++ # commands through without removal of \ escapes. ++ if test -n "${ZSH_VERSION+set}"; then ++ setopt NO_GLOB_SUBST ++ fi ++ ++ cfgfile=${ofile}T ++ trap "$RM \"$cfgfile\"; exit 1" 1 2 15 ++ $RM "$cfgfile" ++ ++ cat <<_LT_EOF >> "$cfgfile" ++#! $SHELL ++# Generated automatically by $as_me ($PACKAGE) $VERSION ++# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: ++# NOTE: Changes made to this file will be lost: look at ltmain.sh. ++ ++# Provide generalized library-building support services. ++# Written by Gordon Matzigkeit, 1996 ++ ++# Copyright (C) 2014 Free Software Foundation, Inc. ++# This is free software; see the source for copying conditions. There is NO ++# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ++ ++# GNU Libtool is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 2 of of the License, or ++# (at your option) any later version. ++# ++# As a special exception to the GNU General Public License, if you ++# distribute this file as part of a program or library that is built ++# using GNU Libtool, you may include this file under the same ++# distribution terms that you use for the rest of that program. ++# ++# GNU Libtool is distributed in the hope that it will be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program. If not, see . ++ ++ ++# The names of the tagged configurations supported by this script. ++available_tags='CXX ' ++ ++# Configured defaults for sys_lib_dlsearch_path munging. ++: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} ++ ++# ### BEGIN LIBTOOL CONFIG ++ ++# Which release of libtool.m4 was used? ++macro_version=$macro_version ++macro_revision=$macro_revision ++ ++# Whether or not to build shared libraries. ++build_libtool_libs=$enable_shared ++ ++# Whether or not to build static libraries. ++build_old_libs=$enable_static ++ ++# What type of objects to build. ++pic_mode=$pic_mode ++ ++# Whether or not to optimize for fast installation. ++fast_install=$enable_fast_install ++ ++# Shared archive member basename,for filename based shared library versioning on AIX. ++shared_archive_member_spec=$shared_archive_member_spec ++ ++# Shell to use when invoking shell scripts. ++SHELL=$lt_SHELL ++ ++# An echo program that protects backslashes. ++ECHO=$lt_ECHO ++ ++# The PATH separator for the build system. ++PATH_SEPARATOR=$lt_PATH_SEPARATOR ++ ++# The host system. ++host_alias=$host_alias ++host=$host ++host_os=$host_os ++ ++# The build system. ++build_alias=$build_alias ++build=$build ++build_os=$build_os ++ ++# A sed program that does not truncate output. ++SED=$lt_SED ++ ++# Sed that helps us avoid accidentally triggering echo(1) options like -n. ++Xsed="\$SED -e 1s/^X//" ++ ++# A grep program that handles long lines. ++GREP=$lt_GREP ++ ++# An ERE matcher. ++EGREP=$lt_EGREP ++ ++# A literal string matcher. ++FGREP=$lt_FGREP ++ ++# A BSD- or MS-compatible name lister. ++NM=$lt_NM ++ ++# Whether we need soft or hard links. ++LN_S=$lt_LN_S ++ ++# What is the maximum length of a command? ++max_cmd_len=$max_cmd_len ++ ++# Object file suffix (normally "o"). ++objext=$ac_objext ++ ++# Executable file suffix (normally ""). ++exeext=$exeext ++ ++# whether the shell understands "unset". ++lt_unset=$lt_unset ++ ++# turn spaces into newlines. ++SP2NL=$lt_lt_SP2NL ++ ++# turn newlines into spaces. ++NL2SP=$lt_lt_NL2SP ++ ++# convert \$build file names to \$host format. ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++ ++# convert \$build files to toolchain format. ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++ ++# A file(cmd) program that detects file types. ++FILECMD=$lt_FILECMD ++ ++# An object symbol dumper. ++OBJDUMP=$lt_OBJDUMP ++ ++# Method to check whether dependent libraries are shared objects. ++deplibs_check_method=$lt_deplibs_check_method ++ ++# Command to use when deplibs_check_method = "file_magic". ++file_magic_cmd=$lt_file_magic_cmd ++ ++# How to find potential files when deplibs_check_method = "file_magic". ++file_magic_glob=$lt_file_magic_glob ++ ++# Find potential files using nocaseglob when deplibs_check_method = "file_magic". ++want_nocaseglob=$lt_want_nocaseglob ++ ++# DLL creation program. ++DLLTOOL=$lt_DLLTOOL ++ ++# Command to associate shared and link libraries. ++sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd ++ ++# The archiver. ++AR=$lt_AR ++ ++# Flags to create an archive (by configure). ++lt_ar_flags=$lt_ar_flags ++ ++# Flags to create an archive. ++AR_FLAGS=\${ARFLAGS-"\$lt_ar_flags"} ++ ++# How to feed a file listing to the archiver. ++archiver_list_spec=$lt_archiver_list_spec ++ ++# A symbol stripping program. ++STRIP=$lt_STRIP ++ ++# Commands used to install an old-style archive. ++RANLIB=$lt_RANLIB ++old_postinstall_cmds=$lt_old_postinstall_cmds ++old_postuninstall_cmds=$lt_old_postuninstall_cmds ++ ++# Whether to use a lock for old archive extraction. ++lock_old_archive_extraction=$lock_old_archive_extraction ++ ++# A C compiler. ++LTCC=$lt_CC ++ ++# LTCC compiler flags. ++LTCFLAGS=$lt_CFLAGS ++ ++# Take the output of nm and produce a listing of raw symbols and C names. ++global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe ++ ++# Transform the output of nm in a proper C declaration. ++global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl ++ ++# Transform the output of nm into a list of symbols to manually relocate. ++global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import ++ ++# Transform the output of nm in a C name address pair. ++global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address ++ ++# Transform the output of nm in a C name address pair when lib prefix is needed. ++global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix ++ ++# The name lister interface. ++nm_interface=$lt_lt_cv_nm_interface ++ ++# Specify filename containing input files for \$NM. ++nm_file_list_spec=$lt_nm_file_list_spec ++ ++# The root where to search for dependent libraries,and where our libraries should be installed. ++lt_sysroot=$lt_sysroot ++ ++# Command to truncate a binary pipe. ++lt_truncate_bin=$lt_lt_cv_truncate_bin ++ ++# The name of the directory that contains temporary libtool files. ++objdir=$objdir ++ ++# Used to examine libraries when file_magic_cmd begins with "file". ++MAGIC_CMD=$MAGIC_CMD ++ ++# Must we lock files when doing compilation? ++need_locks=$lt_need_locks ++ ++# Manifest tool. ++MANIFEST_TOOL=$lt_MANIFEST_TOOL ++ ++# Tool to manipulate archived DWARF debug symbol files on Mac OS X. ++DSYMUTIL=$lt_DSYMUTIL ++ ++# Tool to change global to local symbols on Mac OS X. ++NMEDIT=$lt_NMEDIT ++ ++# Tool to manipulate fat objects and archives on Mac OS X. ++LIPO=$lt_LIPO ++ ++# ldd/readelf like tool for Mach-O binaries on Mac OS X. ++OTOOL=$lt_OTOOL ++ ++# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. ++OTOOL64=$lt_OTOOL64 ++ ++# Old archive suffix (normally "a"). ++libext=$libext ++ ++# Shared library suffix (normally ".so"). ++shrext_cmds=$lt_shrext_cmds ++ ++# The commands to extract the exported symbol list from a shared archive. ++extract_expsyms_cmds=$lt_extract_expsyms_cmds ++ ++# Variables whose values should be saved in libtool wrapper scripts and ++# restored at link time. ++variables_saved_for_relink=$lt_variables_saved_for_relink ++ ++# Do we need the "lib" prefix for modules? ++need_lib_prefix=$need_lib_prefix ++ ++# Do we need a version for libraries? ++need_version=$need_version ++ ++# Library versioning type. ++version_type=$version_type ++ ++# Shared library runtime path variable. ++runpath_var=$runpath_var ++ ++# Shared library path variable. ++shlibpath_var=$shlibpath_var ++ ++# Is shlibpath searched before the hard-coded library search path? ++shlibpath_overrides_runpath=$shlibpath_overrides_runpath ++ ++# Format of library name prefix. ++libname_spec=$lt_libname_spec ++ ++# List of archive names. First name is the real one, the rest are links. ++# The last name is the one that the linker finds with -lNAME ++library_names_spec=$lt_library_names_spec ++ ++# The coded name of the library, if different from the real name. ++soname_spec=$lt_soname_spec ++ ++# Permission mode override for installation of shared libraries. ++install_override_mode=$lt_install_override_mode ++ ++# Command to use after installation of a shared archive. ++postinstall_cmds=$lt_postinstall_cmds ++ ++# Command to use after uninstallation of a shared archive. ++postuninstall_cmds=$lt_postuninstall_cmds ++ ++# Commands used to finish a libtool library installation in a directory. ++finish_cmds=$lt_finish_cmds ++ ++# As "finish_cmds", except a single script fragment to be evaled but ++# not shown. ++finish_eval=$lt_finish_eval ++ ++# Whether we should hardcode library paths into libraries. ++hardcode_into_libs=$hardcode_into_libs ++ ++# Compile-time system search path for libraries. ++sys_lib_search_path_spec=$lt_sys_lib_search_path_spec ++ ++# Detected run-time system search path for libraries. ++sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path ++ ++# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. ++configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path ++ ++# Whether dlopen is supported. ++dlopen_support=$enable_dlopen ++ ++# Whether dlopen of programs is supported. ++dlopen_self=$enable_dlopen_self ++ ++# Whether dlopen of statically linked programs is supported. ++dlopen_self_static=$enable_dlopen_self_static ++ ++# Commands to strip libraries. ++old_striplib=$lt_old_striplib ++striplib=$lt_striplib ++ ++ ++# The linker used to build libraries. ++LD=$lt_LD ++ ++# How to create reloadable object files. ++reload_flag=$lt_reload_flag ++reload_cmds=$lt_reload_cmds ++ ++# Commands used to build an old-style archive. ++old_archive_cmds=$lt_old_archive_cmds ++ ++# A language specific compiler. ++CC=$lt_compiler ++ ++# Is the compiler the GNU compiler? ++with_gcc=$GCC ++ ++# Compiler flag to turn off builtin functions. ++no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag ++ ++# Additional compiler flags for building library objects. ++pic_flag=$lt_lt_prog_compiler_pic ++ ++# How to pass a linker flag through the compiler. ++wl=$lt_lt_prog_compiler_wl ++ ++# Compiler flag to prevent dynamic linking. ++link_static_flag=$lt_lt_prog_compiler_static ++ ++# Does compiler simultaneously support -c and -o options? ++compiler_c_o=$lt_lt_cv_prog_compiler_c_o ++ ++# Whether or not to add -lc for building shared libraries. ++build_libtool_need_lc=$archive_cmds_need_lc ++ ++# Whether or not to disallow shared libs when runtime libs are static. ++allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes ++ ++# Compiler flag to allow reflexive dlopens. ++export_dynamic_flag_spec=$lt_export_dynamic_flag_spec ++ ++# Compiler flag to generate shared objects directly from archives. ++whole_archive_flag_spec=$lt_whole_archive_flag_spec ++ ++# Whether the compiler copes with passing no objects directly. ++compiler_needs_object=$lt_compiler_needs_object ++ ++# Create an old-style archive from a shared archive. ++old_archive_from_new_cmds=$lt_old_archive_from_new_cmds ++ ++# Create a temporary old-style archive to link instead of a shared archive. ++old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds ++ ++# Commands used to build a shared archive. ++archive_cmds=$lt_archive_cmds ++archive_expsym_cmds=$lt_archive_expsym_cmds ++ ++# Commands used to build a loadable module if different from building ++# a shared archive. ++module_cmds=$lt_module_cmds ++module_expsym_cmds=$lt_module_expsym_cmds ++ ++# Whether we are building with GNU ld or not. ++with_gnu_ld=$lt_with_gnu_ld ++ ++# Flag that allows shared libraries with undefined symbols to be built. ++allow_undefined_flag=$lt_allow_undefined_flag ++ ++# Flag that enforces no undefined symbols. ++no_undefined_flag=$lt_no_undefined_flag ++ ++# Flag to hardcode \$libdir into a binary during linking. ++# This must work even if \$libdir does not exist ++hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec ++ ++# Whether we need a single "-rpath" flag with a separated argument. ++hardcode_libdir_separator=$lt_hardcode_libdir_separator ++ ++# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes ++# DIR into the resulting binary. ++hardcode_direct=$hardcode_direct ++ ++# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes ++# DIR into the resulting binary and the resulting library dependency is ++# "absolute",i.e impossible to change by setting \$shlibpath_var if the ++# library is relocated. ++hardcode_direct_absolute=$hardcode_direct_absolute ++ ++# Set to "yes" if using the -LDIR flag during linking hardcodes DIR ++# into the resulting binary. ++hardcode_minus_L=$hardcode_minus_L ++ ++# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR ++# into the resulting binary. ++hardcode_shlibpath_var=$hardcode_shlibpath_var ++ ++# Set to "yes" if building a shared library automatically hardcodes DIR ++# into the library and all subsequent libraries and executables linked ++# against it. ++hardcode_automatic=$hardcode_automatic ++ ++# Set to yes if linker adds runtime paths of dependent libraries ++# to runtime path list. ++inherit_rpath=$inherit_rpath ++ ++# Whether libtool must link a program against all its dependency libraries. ++link_all_deplibs=$link_all_deplibs ++ ++# Set to "yes" if exported symbols are required. ++always_export_symbols=$always_export_symbols ++ ++# The commands to list exported symbols. ++export_symbols_cmds=$lt_export_symbols_cmds ++ ++# Symbols that should not be listed in the preloaded symbols. ++exclude_expsyms=$lt_exclude_expsyms ++ ++# Symbols that must always be exported. ++include_expsyms=$lt_include_expsyms ++ ++# Commands necessary for linking programs (against libraries) with templates. ++prelink_cmds=$lt_prelink_cmds ++ ++# Commands necessary for finishing linking programs. ++postlink_cmds=$lt_postlink_cmds ++ ++# Specify filename containing input files. ++file_list_spec=$lt_file_list_spec ++ ++# How to hardcode a shared library path into an executable. ++hardcode_action=$hardcode_action ++ ++# The directories searched by this compiler when creating a shared library. ++compiler_lib_search_dirs=$lt_compiler_lib_search_dirs ++ ++# Dependencies to place before and after the objects being linked to ++# create a shared library. ++predep_objects=$lt_predep_objects ++postdep_objects=$lt_postdep_objects ++predeps=$lt_predeps ++postdeps=$lt_postdeps ++ ++# The library search path used internally by the compiler when linking ++# a shared library. ++compiler_lib_search_path=$lt_compiler_lib_search_path ++ ++# ### END LIBTOOL CONFIG ++ ++_LT_EOF ++ ++ cat <<'_LT_EOF' >> "$cfgfile" ++ ++# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE ++ ++# func_munge_path_list VARIABLE PATH ++# ----------------------------------- ++# VARIABLE is name of variable containing _space_ separated list of ++# directories to be munged by the contents of PATH, which is string ++# having a format: ++# "DIR[:DIR]:" ++# string "DIR[ DIR]" will be prepended to VARIABLE ++# ":DIR[:DIR]" ++# string "DIR[ DIR]" will be appended to VARIABLE ++# "DIRP[:DIRP]::[DIRA:]DIRA" ++# string "DIRP[ DIRP]" will be prepended to VARIABLE and string ++# "DIRA[ DIRA]" will be appended to VARIABLE ++# "DIR[:DIR]" ++# VARIABLE will be replaced by "DIR[ DIR]" ++func_munge_path_list () ++{ ++ case x$2 in ++ x) ++ ;; ++ *:) ++ eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" ++ ;; ++ x:*) ++ eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" ++ ;; ++ *::*) ++ eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" ++ eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" ++ ;; ++ *) ++ eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" ++ ;; ++ esac ++} ++ ++ ++# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. ++func_cc_basename () ++{ ++ for cc_temp in $*""; do ++ case $cc_temp in ++ compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; ++ distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; ++ \-*) ;; ++ *) break;; ++ esac ++ done ++ func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` ++} ++ ++ ++# ### END FUNCTIONS SHARED WITH CONFIGURE ++ ++_LT_EOF ++ ++ case $host_os in ++ aix3*) ++ cat <<\_LT_EOF >> "$cfgfile" ++# AIX sometimes has problems with the GCC collect2 program. For some ++# reason, if we set the COLLECT_NAMES environment variable, the problems ++# vanish in a puff of smoke. ++if test set != "${COLLECT_NAMES+set}"; then ++ COLLECT_NAMES= ++ export COLLECT_NAMES ++fi ++_LT_EOF ++ ;; ++ esac ++ ++ ++ ++ltmain=$ac_aux_dir/ltmain.sh ++ ++ ++ # We use sed instead of cat because bash on DJGPP gets confused if ++ # if finds mixed CR/LF and LF-only lines. Since sed operates in ++ # text mode, it properly converts lines to CR/LF. This bash problem ++ # is reportedly fixed, but why not run on old versions too? ++ $SED '$q' "$ltmain" >> "$cfgfile" \ ++ || (rm -f "$cfgfile"; exit 1) ++ ++ mv -f "$cfgfile" "$ofile" || ++ (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") ++ chmod +x "$ofile" ++ ++ ++ cat <<_LT_EOF >> "$ofile" ++ ++# ### BEGIN LIBTOOL TAG CONFIG: CXX ++ ++# The linker used to build libraries. ++LD=$lt_LD_CXX ++ ++# How to create reloadable object files. ++reload_flag=$lt_reload_flag_CXX ++reload_cmds=$lt_reload_cmds_CXX ++ ++# Commands used to build an old-style archive. ++old_archive_cmds=$lt_old_archive_cmds_CXX ++ ++# A language specific compiler. ++CC=$lt_compiler_CXX ++ ++# Is the compiler the GNU compiler? ++with_gcc=$GCC_CXX ++ ++# Compiler flag to turn off builtin functions. ++no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX ++ ++# Additional compiler flags for building library objects. ++pic_flag=$lt_lt_prog_compiler_pic_CXX ++ ++# How to pass a linker flag through the compiler. ++wl=$lt_lt_prog_compiler_wl_CXX ++ ++# Compiler flag to prevent dynamic linking. ++link_static_flag=$lt_lt_prog_compiler_static_CXX ++ ++# Does compiler simultaneously support -c and -o options? ++compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX ++ ++# Whether or not to add -lc for building shared libraries. ++build_libtool_need_lc=$archive_cmds_need_lc_CXX ++ ++# Whether or not to disallow shared libs when runtime libs are static. ++allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX ++ ++# Compiler flag to allow reflexive dlopens. ++export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX ++ ++# Compiler flag to generate shared objects directly from archives. ++whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX ++ ++# Whether the compiler copes with passing no objects directly. ++compiler_needs_object=$lt_compiler_needs_object_CXX ++ ++# Create an old-style archive from a shared archive. ++old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX ++ ++# Create a temporary old-style archive to link instead of a shared archive. ++old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX ++ ++# Commands used to build a shared archive. ++archive_cmds=$lt_archive_cmds_CXX ++archive_expsym_cmds=$lt_archive_expsym_cmds_CXX ++ ++# Commands used to build a loadable module if different from building ++# a shared archive. ++module_cmds=$lt_module_cmds_CXX ++module_expsym_cmds=$lt_module_expsym_cmds_CXX ++ ++# Whether we are building with GNU ld or not. ++with_gnu_ld=$lt_with_gnu_ld_CXX ++ ++# Flag that allows shared libraries with undefined symbols to be built. ++allow_undefined_flag=$lt_allow_undefined_flag_CXX ++ ++# Flag that enforces no undefined symbols. ++no_undefined_flag=$lt_no_undefined_flag_CXX ++ ++# Flag to hardcode \$libdir into a binary during linking. ++# This must work even if \$libdir does not exist ++hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX ++ ++# Whether we need a single "-rpath" flag with a separated argument. ++hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX ++ ++# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes ++# DIR into the resulting binary. ++hardcode_direct=$hardcode_direct_CXX ++ ++# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes ++# DIR into the resulting binary and the resulting library dependency is ++# "absolute",i.e impossible to change by setting \$shlibpath_var if the ++# library is relocated. ++hardcode_direct_absolute=$hardcode_direct_absolute_CXX ++ ++# Set to "yes" if using the -LDIR flag during linking hardcodes DIR ++# into the resulting binary. ++hardcode_minus_L=$hardcode_minus_L_CXX ++ ++# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR ++# into the resulting binary. ++hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX ++ ++# Set to "yes" if building a shared library automatically hardcodes DIR ++# into the library and all subsequent libraries and executables linked ++# against it. ++hardcode_automatic=$hardcode_automatic_CXX ++ ++# Set to yes if linker adds runtime paths of dependent libraries ++# to runtime path list. ++inherit_rpath=$inherit_rpath_CXX ++ ++# Whether libtool must link a program against all its dependency libraries. ++link_all_deplibs=$link_all_deplibs_CXX ++ ++# Set to "yes" if exported symbols are required. ++always_export_symbols=$always_export_symbols_CXX ++ ++# The commands to list exported symbols. ++export_symbols_cmds=$lt_export_symbols_cmds_CXX ++ ++# Symbols that should not be listed in the preloaded symbols. ++exclude_expsyms=$lt_exclude_expsyms_CXX ++ ++# Symbols that must always be exported. ++include_expsyms=$lt_include_expsyms_CXX ++ ++# Commands necessary for linking programs (against libraries) with templates. ++prelink_cmds=$lt_prelink_cmds_CXX ++ ++# Commands necessary for finishing linking programs. ++postlink_cmds=$lt_postlink_cmds_CXX ++ ++# Specify filename containing input files. ++file_list_spec=$lt_file_list_spec_CXX ++ ++# How to hardcode a shared library path into an executable. ++hardcode_action=$hardcode_action_CXX ++ ++# The directories searched by this compiler when creating a shared library. ++compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX ++ ++# Dependencies to place before and after the objects being linked to ++# create a shared library. ++predep_objects=$lt_predep_objects_CXX ++postdep_objects=$lt_postdep_objects_CXX ++predeps=$lt_predeps_CXX ++postdeps=$lt_postdeps_CXX ++ ++# The library search path used internally by the compiler when linking ++# a shared library. ++compiler_lib_search_path=$lt_compiler_lib_search_path_CXX ++ ++# ### END LIBTOOL TAG CONFIG: CXX ++_LT_EOF ++ ++ ;; ++ ++ esac ++done # for ac_tag ++ ++ ++as_fn_exit 0 ++_ACEOF ++ac_clean_files=$ac_clean_files_save ++ ++test $ac_write_fail = 0 || ++ as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 ++ ++ ++# configure is writing to config.log, and then calls config.status. ++# config.status does its own redirection, appending to config.log. ++# Unfortunately, on DOS this fails, as config.log is still kept open ++# by configure, so config.status won't be able to write to it; its ++# output is simply discarded. So we exec the FD to /dev/null, ++# effectively closing config.log, so it can be properly (re)opened and ++# appended to by config.status. When coming back to configure, we ++# need to make the FD available again. ++if test "$no_create" != yes; then ++ ac_cs_success=: ++ ac_config_status_args= ++ test "$silent" = yes && ++ ac_config_status_args="$ac_config_status_args --quiet" ++ exec 5>/dev/null ++ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false ++ exec 5>>config.log ++ # Use ||, not &&, to avoid exiting from the if with $? = 1, which ++ # would make configure fail if this is the last instruction. ++ $ac_cs_success || as_fn_exit 1 ++fi ++if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then ++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 ++printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} ++fi ++ ++ ++ +diff --git a/bolt-plugin/configure.ac b/bolt-plugin/configure.ac +new file mode 100644 +index 000000000..111d0ac48 +--- /dev/null ++++ b/bolt-plugin/configure.ac +@@ -0,0 +1,60 @@ ++AC_INIT([bolt plugin for ld],[0.1],[],[bolt-plugin]) ++AC_CANONICAL_TARGET ++GCC_TOPLEV_SUBDIRS ++AM_INIT_AUTOMAKE([foreign no-dist]) ++AM_MAINTAINER_MODE ++AC_ARG_WITH(libiberty, ++ [AS_HELP_STRING([--with-libiberty=PATH], ++ [specify the directory where to find libiberty [../libiberty]])], ++ [], with_libiberty=../libiberty) ++AC_SUBST(with_libiberty) ++AC_USE_SYSTEM_EXTENSIONS ++AC_PROG_CC ++AC_PROG_CXX ++AC_SYS_LARGEFILE ++ACX_PROG_CC_WARNING_OPTS([-Wall], [ac_bolt_plugin_warn_cflags]) ++ ++# Check whether -static-libgcc is supported. ++saved_LDFLAGS="$LDFLAGS" ++LDFLAGS="$LDFLAGS -static-libgcc" ++AC_MSG_CHECKING([for -static-libgcc]) ++AC_LINK_IFELSE([AC_LANG_SOURCE([ ++ int main() {}])], [have_static_libgcc=yes], [have_static_libgcc=no]) ++AC_MSG_RESULT($have_static_libgcc); ++LDFLAGS="$saved_LDFLAGS" ++# Need -Wc to get it through libtool. ++if test "x$have_static_libgcc" = xyes; then ++ ac_bolt_plugin_ldflags="-Wc,-static-libgcc" ++fi ++AC_SUBST(ac_bolt_plugin_ldflags) ++ ++if test x"$host_subdir" = x.; then ++ gcc_build_dir=../gcc ++else ++ gcc_build_dir=../../$host_subdir/gcc ++fi ++AC_SUBST(gcc_build_dir) ++ ++# Used for constructing correct paths for offload compilers. ++accel_dir_suffix= ++real_target_noncanonical=${target_noncanonical} ++if test x"$enable_as_accelerator_for" != x; then ++ accel_dir_suffix=/accel/${target_noncanonical} ++ real_target_noncanonical=${enable_as_accelerator_for} ++fi ++AC_SUBST(accel_dir_suffix) ++AC_SUBST(real_target_noncanonical) ++ ++# Determine what GCC version number to use in filesystem paths. ++GCC_BASE_VER ++ ++LT_INIT ++ACX_LT_HOST_FLAGS ++AC_SUBST(target_noncanonical) ++AC_TYPE_INT64_T ++AC_TYPE_UINT64_T ++AC_HEADER_SYS_WAIT ++AC_CONFIG_FILES(Makefile) ++AC_CONFIG_HEADERS(config.h) ++AC_OUTPUT ++ +diff --git a/gcc/common.opt b/gcc/common.opt +index e69947fc2..44638fe83 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -2554,6 +2554,22 @@ fauto-bolt= + Common Joined RejectNegative + Specify the feedback data directory required by BOLT-plugin. The default is the current directory. + ++fbolt-use ++Common Var(flag_bolt_use) ++Do BOLT optimization after linkage with BOLT profile read from this option. The default is data.fdata. ++ ++fbolt-use= ++Common Joined RejectNegative Var ++Do BOLT optimization after linkage with BOLT profile read from this option. ++ ++fbolt-target= ++Common Joined RejectNegative Var ++Specify the BOLT optimization target binary. ++ ++fbolt-option= ++Common Joined RejectNegative Var ++Specify BOLT optimization options separated by commas. ++ + frerun-cse-after-loop + Common Var(flag_rerun_cse_after_loop) Optimization + Add a common subexpression elimination pass after loop optimizations. +diff --git a/gcc/opts.cc b/gcc/opts.cc +index 6d57e7d69..2bba88140 100644 +--- a/gcc/opts.cc ++++ b/gcc/opts.cc +@@ -1283,6 +1283,10 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set, + if (opts->x_flag_auto_bolt && opts->x_flag_lto) + sorry ("%<-fauto-bolt%> is not supported with LTO"); + ++ /* Currently -fbolt-use is not supported for LTO. */ ++ if (opts->x_flag_bolt_use && opts->x_flag_lto) ++ sorry ("-fbolt-use is not supported with LTO"); ++ + /* Control IPA optimizations based on different -flive-patching level. */ + if (opts->x_flag_live_patching) + control_options_for_live_patching (opts, opts_set, +@@ -3284,9 +3288,28 @@ common_handle_option (struct gcc_options *opts, + break; + + case OPT_fauto_bolt_: ++ opts->x_flag_auto_bolt = true; ++ /* FALLTHRU */ + case OPT_fauto_bolt: +- /* Deferred. */ +- break; ++ if (opts->x_flag_bolt_use) ++ error_at (loc, ++ "-fauto-bolt conflicts with -fbolt-use."); ++ break; ++ ++ case OPT_fbolt_use_: ++ case OPT_fbolt_use: ++ if (opts->x_flag_auto_bolt) ++ error_at (loc, ++ "-fauto-bolt conflicts with -fbolt-use."); ++ break; ++ ++ case OPT_fbolt_target_: ++ /* Deferred. */ ++ break; ++ ++ case OPT_fbolt_option_: ++ /* Defferred */ ++ break; + + case OPT_ftabstop_: + /* It is documented that we silently ignore silly values. */ +-- +2.33.0 + diff --git a/0033-AutoBOLT-Enable-BOLT-linker-plugin-on-aarch64-3-3.patch b/0033-AutoBOLT-Enable-BOLT-linker-plugin-on-aarch64-3-3.patch new file mode 100644 index 0000000000000000000000000000000000000000..e850a4dfbd2ffdccf0ae0b1469b5287d6ae6719f --- /dev/null +++ b/0033-AutoBOLT-Enable-BOLT-linker-plugin-on-aarch64-3-3.patch @@ -0,0 +1,345 @@ +From 94242286383a80e6ab83d824a4d7ea23ea311f75 Mon Sep 17 00:00:00 2001 +From: zhenyu--zhao_admin +Date: Mon, 22 Jan 2024 15:38:24 +0800 +Subject: [PATCH] [AutoBOLT] Enable BOLT linker plugin on aarch64 3/3 + +--- + Makefile.def | 10 ++++++++++ + configure | 27 ++++++++++++++++++++++++++- + configure.ac | 22 +++++++++++++++++++++- + gcc/config.host | 1 + + gcc/config.in | 13 +++++++++++++ + gcc/configure | 10 ++++++++-- + gcc/configure.ac | 4 ++++ + gcc/gcc.cc | 23 +++++++++++++++++++++++ + 8 files changed, 106 insertions(+), 4 deletions(-) + +diff --git a/Makefile.def b/Makefile.def +index 72d585496..0ba868890 100644 +--- a/Makefile.def ++++ b/Makefile.def +@@ -145,6 +145,9 @@ host_modules= { module= gnattools; }; + host_modules= { module= lto-plugin; bootstrap=true; + extra_configure_flags='--enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@'; + extra_make_flags='@extra_linker_plugin_flags@'; }; ++host_modules= { module= bolt-plugin; bootstrap=true; ++ extra_configure_flags='--enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@'; ++ extra_make_flags='@extra_linker_plugin_flags@'; }; + host_modules= { module= libcc1; extra_configure_flags=--enable-shared; }; + host_modules= { module= gotools; }; + host_modules= { module= libctf; bootstrap=true; }; +@@ -349,6 +352,7 @@ dependencies = { module=configure-gcc; on=all-mpfr; }; + dependencies = { module=configure-gcc; on=all-mpc; }; + dependencies = { module=configure-gcc; on=all-isl; }; + dependencies = { module=configure-gcc; on=all-lto-plugin; }; ++dependencies = { module=configure-gcc; on=all-bolt-plugin; }; + dependencies = { module=configure-gcc; on=all-binutils; }; + dependencies = { module=configure-gcc; on=all-gas; }; + dependencies = { module=configure-gcc; on=all-ld; }; +@@ -374,6 +378,7 @@ dependencies = { module=all-gcc; on=all-libdecnumber; hard=true; }; + dependencies = { module=all-gcc; on=all-libiberty; }; + dependencies = { module=all-gcc; on=all-fixincludes; }; + dependencies = { module=all-gcc; on=all-lto-plugin; }; ++dependencies = { module=all-gcc; on=all-bolt-plugin; }; + dependencies = { module=all-gcc; on=all-libiconv; }; + dependencies = { module=info-gcc; on=all-build-libiberty; }; + dependencies = { module=dvi-gcc; on=all-build-libiberty; }; +@@ -381,8 +386,10 @@ dependencies = { module=pdf-gcc; on=all-build-libiberty; }; + dependencies = { module=html-gcc; on=all-build-libiberty; }; + dependencies = { module=install-gcc ; on=install-fixincludes; }; + dependencies = { module=install-gcc ; on=install-lto-plugin; }; ++dependencies = { module=install-gcc ; on=install-bolt-plugin; }; + dependencies = { module=install-strip-gcc ; on=install-strip-fixincludes; }; + dependencies = { module=install-strip-gcc ; on=install-strip-lto-plugin; }; ++dependencies = { module=install-strip-gcc ; on=install-strip-bolt-plugin; }; + + dependencies = { module=configure-libcpp; on=configure-libiberty; hard=true; }; + dependencies = { module=configure-libcpp; on=configure-intl; }; +@@ -401,6 +408,9 @@ dependencies = { module=all-gnattools; on=all-target-libstdc++-v3; }; + dependencies = { module=all-lto-plugin; on=all-libiberty; }; + dependencies = { module=all-lto-plugin; on=all-libiberty-linker-plugin; }; + ++dependencies = { module=all-bolt-plugin; on=all-libiberty; }; ++dependencies = { module=all-bolt-plugin; on=all-libiberty-linker-plugin; }; ++ + dependencies = { module=configure-libcc1; on=configure-gcc; }; + dependencies = { module=all-libcc1; on=all-gcc; }; + +diff --git a/configure b/configure +index 5dcaab14a..aff62c464 100755 +--- a/configure ++++ b/configure +@@ -826,6 +826,7 @@ with_isl + with_isl_include + with_isl_lib + enable_isl_version_check ++enable_bolt + enable_lto + enable_linker_plugin_configure_flags + enable_linker_plugin_flags +@@ -1550,6 +1551,7 @@ Optional Features: + enable the PGO build + --disable-isl-version-check + disable check for isl version ++ --enable-bolt enable bolt optimization support + --enable-lto enable link time optimization support + --enable-linker-plugin-configure-flags=FLAGS + additional flags for configuring linker plugins +@@ -8564,6 +8566,15 @@ fi + + + ++# Check for BOLT support. ++# Check whether --enable-bolt was given. ++if test "${enable_bolt+set}" = set; then : ++ enableval=$enable_bolt; enable_bolt=$enableval ++else ++ enable_bolt=no; default_enable_bolt=no ++fi ++ ++ + # Check for LTO support. + # Check whether --enable-lto was given. + if test "${enable_lto+set}" = set; then : +@@ -8593,6 +8604,16 @@ if test $target_elf = yes; then : + # ELF platforms build the lto-plugin always. + build_lto_plugin=yes + ++ # ELF platforms can build the bolt-plugin. ++ # NOT BUILD BOLT BY DEFAULT. ++ case $target in ++ aarch64*-*-linux*) ++ if test $enable_bolt = yes; then : ++ build_bolt_plugin=yes ++ fi ++ ;; ++ esac ++ + else + if test x"$default_enable_lto" = x"yes" ; then + case $target in +@@ -8780,6 +8801,10 @@ if test -d ${srcdir}/gcc; then + fi + fi + ++ if test "${build_bolt_plugin}" = "yes" ; then ++ configdirs="$configdirs bolt-plugin" ++ fi ++ + # If we're building an offloading compiler, add the LTO front end. + if test x"$enable_as_accelerator_for" != x ; then + case ,${enable_languages}, in +@@ -9202,7 +9227,7 @@ fi + extra_host_libiberty_configure_flags= + extra_host_zlib_configure_flags= + case " $configdirs " in +- *" lto-plugin "* | *" libcc1 "*) ++ *" lto-plugin "* | *" libcc1 "* | *" bolt-plugin "*) + # When these are to be built as shared libraries, the same applies to + # libiberty. + extra_host_libiberty_configure_flags=--enable-shared +diff --git a/configure.ac b/configure.ac +index 85977482a..f310d75ca 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -1863,6 +1863,12 @@ fi + AC_SUBST(isllibs) + AC_SUBST(islinc) + ++# Check for BOLT support. ++AC_ARG_ENABLE(bolt, ++[AS_HELP_STRING([--enable-bolt], [enable bolt optimization support])], ++enable_bolt=$enableval, ++enable_bolt=no; default_enable_bolt=no) ++ + # Check for LTO support. + AC_ARG_ENABLE(lto, + [AS_HELP_STRING([--enable-lto], [enable link time optimization support])], +@@ -1871,6 +1877,16 @@ enable_lto=yes; default_enable_lto=yes) + + ACX_ELF_TARGET_IFELSE([# ELF platforms build the lto-plugin always. + build_lto_plugin=yes ++ ++ # ELF platforms can build the bolt-plugin. ++ # NOT BUILD BOLT BY DEFAULT. ++ case $target in ++ aarch64*-*-linux*) ++ if test $enable_bolt = yes; then : ++ build_bolt_plugin=yes ++ fi ++ ;; ++ esac + ],[if test x"$default_enable_lto" = x"yes" ; then + case $target in + *-apple-darwin[[912]]* | *-cygwin* | *-mingw* | *djgpp*) ;; +@@ -2049,6 +2065,10 @@ if test -d ${srcdir}/gcc; then + fi + fi + ++ if test "${build_bolt_plugin}" = "yes" ; then ++ configdirs="$configdirs bolt-plugin" ++ fi ++ + # If we're building an offloading compiler, add the LTO front end. + if test x"$enable_as_accelerator_for" != x ; then + case ,${enable_languages}, in +@@ -2457,7 +2477,7 @@ fi + extra_host_libiberty_configure_flags= + extra_host_zlib_configure_flags= + case " $configdirs " in +- *" lto-plugin "* | *" libcc1 "*) ++ *" lto-plugin "* | *" libcc1 "* | *" bolt-plugin "*) + # When these are to be built as shared libraries, the same applies to + # libiberty. + extra_host_libiberty_configure_flags=--enable-shared +diff --git a/gcc/config.host b/gcc/config.host +index 4ca300f11..bf7dcb4cc 100644 +--- a/gcc/config.host ++++ b/gcc/config.host +@@ -75,6 +75,7 @@ out_host_hook_obj=host-default.o + host_can_use_collect2=yes + use_long_long_for_widest_fast_int=no + host_lto_plugin_soname=liblto_plugin.so ++host_bolt_plugin_soname=libbolt_plugin.so + + # Unsupported hosts list. Generally, only include hosts known to fail here, + # since we allow hosts not listed to be supported generically. +diff --git a/gcc/config.in b/gcc/config.in +index 64c27c9cf..6bb25b25b 100644 +--- a/gcc/config.in ++++ b/gcc/config.in +@@ -24,6 +24,13 @@ + #endif + + ++/* Define to the name of the BOLT plugin DSO that must be passed to the ++ linker's -plugin=LIB option. */ ++#ifndef USED_FOR_TARGET ++#undef BOLTPLUGINSONAME ++#endif ++ ++ + /* Define to the root for URLs about GCC changes. */ + #ifndef USED_FOR_TARGET + #undef CHANGES_ROOT_URL +@@ -2208,6 +2215,12 @@ + #endif + + ++/* Define which stat syscall is able to handle 64bit indodes. */ ++#ifndef USED_FOR_TARGET ++#undef HOST_STAT_FOR_64BIT_INODES ++#endif ++ ++ + /* Define as const if the declaration of iconv() needs const. */ + #ifndef USED_FOR_TARGET + #undef ICONV_CONST +diff --git a/gcc/configure b/gcc/configure +index 98bbf0f85..30f386789 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -13578,6 +13578,12 @@ case $use_collect2 in + esac + + ++cat >>confdefs.h <<_ACEOF ++#define BOLTPLUGINSONAME "${host_bolt_plugin_soname}" ++_ACEOF ++ ++ ++ + cat >>confdefs.h <<_ACEOF + #define LTOPLUGINSONAME "${host_lto_plugin_soname}" + _ACEOF +@@ -19668,7 +19674,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 19671 "configure" ++#line 19677 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -19774,7 +19780,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 19777 "configure" ++#line 19783 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +diff --git a/gcc/configure.ac b/gcc/configure.ac +index c74f4b555..dd6cd60f8 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -2531,6 +2531,10 @@ case $use_collect2 in + ;; + esac + ++AC_DEFINE_UNQUOTED(BOLTPLUGINSONAME,"${host_bolt_plugin_soname}", ++[Define to the name of the BOLT plugin DSO that must be ++ passed to the linker's -plugin=LIB option.]) ++ + AC_DEFINE_UNQUOTED(LTOPLUGINSONAME,"${host_lto_plugin_soname}", + [Define to the name of the LTO plugin DSO that must be + passed to the linker's -plugin=LIB option.]) +diff --git a/gcc/gcc.cc b/gcc/gcc.cc +index fbcc9d033..b0d03430e 100644 +--- a/gcc/gcc.cc ++++ b/gcc/gcc.cc +@@ -1156,6 +1156,8 @@ proper position among the other output files. */ + %{!fsyntax-only:%{!c:%{!M:%{!MM:%{!E:%{!S:\ + %(linker) " \ + LINK_PLUGIN_SPEC \ ++ "%{fauto-bolt|fauto-bolt=*|fbolt-use|fbolt-use=*: \ ++ -plugin %(linker_auto_bolt_plugin_file) }"\ + "%{flto|flto=*:% +Date: Sat, 23 Mar 2024 22:56:09 +0800 +Subject: [PATCH] [Autofdo]Enable discrimibator and MCF algorithm on Autofdo + +--- + gcc/auto-profile.cc | 171 +++++++++++++++++++++++++++++++++++++++++++- + gcc/cfghooks.cc | 7 ++ + gcc/opts.cc | 5 +- + gcc/tree-inline.cc | 14 ++++ + 4 files changed, 193 insertions(+), 4 deletions(-) + +diff --git a/gcc/auto-profile.cc b/gcc/auto-profile.cc +index 2b34b80b8..f45f0ec66 100644 +--- a/gcc/auto-profile.cc ++++ b/gcc/auto-profile.cc +@@ -466,6 +466,17 @@ string_table::get_index (const char *name) const + if (name == NULL) + return -1; + string_index_map::const_iterator iter = map_.find (name); ++ /* Function name may be duplicate. Try to distinguish by the ++ #file_name#function_name defined by the autofdo tool chain. */ ++ if (iter == map_.end ()) ++ { ++ char* file_name = get_original_name (lbasename (dump_base_name)); ++ char* file_func_name ++ = concat ("#", file_name, "#", name, NULL); ++ iter = map_.find (file_func_name); ++ free (file_name); ++ free (file_func_name); ++ } + if (iter == map_.end ()) + return -1; + +@@ -654,7 +665,7 @@ function_instance::read_function_instance (function_instance_stack *stack, + + for (unsigned i = 0; i < num_pos_counts; i++) + { +- unsigned offset = gcov_read_unsigned () & 0xffff0000; ++ unsigned offset = gcov_read_unsigned (); + unsigned num_targets = gcov_read_unsigned (); + gcov_type count = gcov_read_counter (); + s->pos_counts[offset].count = count; +@@ -733,6 +744,10 @@ autofdo_source_profile::get_count_info (gimple *stmt, count_info *info) const + function_instance *s = get_function_instance_by_inline_stack (stack); + if (s == NULL) + return false; ++ if (s->get_count_info (stack[0].second + stmt->bb->discriminator, info)) ++ { ++ return true; ++ } + return s->get_count_info (stack[0].second, info); + } + +@@ -1395,6 +1410,66 @@ afdo_propagate (bb_set *annotated_bb) + } + } + ++/* Process the following scene when the branch probability ++ inversion when do function afdo_propagate (). E.g. ++ BB_NUM (sample count) ++ BB1 (1000) ++ / \ ++ BB2 (10) BB3 (0) ++ \ / ++ BB4 ++ In afdo_propagate ().count of BB3 is calculated by ++ COUNT (BB3) = 990 (990 = COUNT (BB1) - COUNT (BB2) = 1000 - 10) ++ In fact, BB3 may be colder than BB2 by sample count. ++ This function allocate source BB count to wach succ BB by sample ++ rate, E.g. ++ BB2_COUNT = BB1_COUNT * (BB2_COUNT / (BB2_COUNT + BB3_COUNT)) */ ++ ++static void ++afdo_preprocess_bb_count () ++{ ++ basic_block bb; ++ FOR_ALL_BB_FN (bb, cfun) ++ { ++ if (bb->count.ipa_p () && EDGE_COUNT (bb->succs) > 1 ++ && bb->count > profile_count::zero ().afdo ()) ++ { ++ basic_block bb1 = EDGE_SUCC (bb, 0)->dest; ++ basic_block bb2 = EDGE_SUCC (bb, 1)->dest; ++ if (single_succ_edge (bb1) && single_succ_edge (bb2) ++ && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest) ++ { ++ gcov_type max_count = 0; ++ gcov_type total_count = 0; ++ edge e; ++ edge_iterator ei; ++ FOR_EACH_EDGE (e, ei, bb->succs) ++ { ++ if (!e->dest->count.ipa_p ()) ++ { ++ continue; ++ } ++ max_count = MAX (max_count, e->dest->count.to_gcov_type ()); ++ total_count += e->dest->count.to_gcov_type (); ++ } ++ /* Only bb_count > max_count * 2, branch probability will ++ inversion. */ ++ if (max_count > 0 && bb->count.to_gcov_type () > max_count * 2) ++ { ++ FOR_EACH_EDGE (e, ei, bb->succs) ++ { ++ gcov_type target_count = bb->count.to_gcov_type () ++ * e->dest->count.to_gcov_type ()/ total_count; ++ e->dest->count ++ = profile_count::from_gcov_type ++ (target_count).afdo (); ++ } ++ } ++ } ++ } ++ } ++} ++ + /* Propagate counts on control flow graph and calculate branch + probabilities. */ + +@@ -1420,6 +1495,7 @@ afdo_calculate_branch_prob (bb_set *annotated_bb) + } + + afdo_find_equiv_class (annotated_bb); ++ afdo_preprocess_bb_count (); + afdo_propagate (annotated_bb); + + FOR_EACH_BB_FN (bb, cfun) +@@ -1523,6 +1599,83 @@ afdo_vpt_for_early_inline (stmt_set *promoted_stmts) + return false; + } + ++/* Preparation before executing MCF algorithm. */ ++ ++static void ++afdo_init_mcf () ++{ ++ basic_block bb; ++ edge e; ++ edge_iterator ei; ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "\n init calling mcf_smooth_cfg (). \n"); ++ } ++ ++ /* Step1: when use mcf, BB id must be continous, ++ so we need compact_blocks (). */ ++ compact_blocks (); ++ ++ /* Step2: allocate memory for MCF input data. */ ++ bb_gcov_counts.safe_grow_cleared (cfun->cfg->x_last_basic_block); ++ edge_gcov_counts = new hash_map; ++ ++ /* Step3: init MCF input data from cfg. */ ++ FOR_ALL_BB_FN (bb, cfun) ++ { ++ /* Init BB count for MCF. */ ++ bb_gcov_count (bb) = bb->count.to_gcov_type (); ++ ++ gcov_type total_count = 0; ++ FOR_EACH_EDGE (e, ei, bb->succs) ++ { ++ total_count += e->dest->count.to_gcov_type (); ++ } ++ ++ /* If there is no sample in each successor blocks, source ++ BB samples are allocated to each edge by branch static prob. */ ++ ++ FOR_EACH_EDGE (e, ei, bb->succs) ++ { ++ if (total_count == 0) ++ { ++ edge_gcov_count (e) = e->src->count.to_gcov_type () ++ * e->probability.to_reg_br_prob_base () / REG_BR_PROB_BASE; ++ } ++ else ++ { ++ edge_gcov_count (e) = e->src->count.to_gcov_type () ++ * e->dest->count.to_gcov_type () / total_count; ++ } ++ } ++ } ++} ++ ++ ++/* Free the resources used by MCF and reset BB count from MCF result. ++ branch probability has been updated in mcf_smooth_cfg (). */ ++ ++static void ++afdo_process_after_mcf () ++{ ++ basic_block bb; ++ /* Reset BB count from MCF result. */ ++ FOR_EACH_BB_FN (bb, cfun) ++ { ++ if (bb_gcov_count (bb)) ++ { ++ bb->count ++ = profile_count::from_gcov_type (bb_gcov_count (bb)).afdo (); ++ } ++ } ++ ++ /* Clean up MCF resource. */ ++ bb_gcov_counts.release (); ++ delete edge_gcov_counts; ++ edge_gcov_counts = NULL; ++} ++ + /* Annotate auto profile to the control flow graph. Do not annotate value + profile for stmts in PROMOTED_STMTS. */ + +@@ -1574,8 +1727,20 @@ afdo_annotate_cfg (const stmt_set &promoted_stmts) + afdo_source_profile->mark_annotated (cfun->function_end_locus); + if (max_count > profile_count::zero ()) + { +- /* Calculate, propagate count and probability information on CFG. */ +- afdo_calculate_branch_prob (&annotated_bb); ++ /* 1 means -fprofile-correction is enbaled manually, and MCF ++ algorithm will be used to calculate count and probability. ++ Otherwise, use the default calculate algorithm. */ ++ if (flag_profile_correction == 1) ++ { ++ afdo_init_mcf (); ++ mcf_smooth_cfg (); ++ afdo_process_after_mcf (); ++ } ++ else ++ { ++ /* Calculate, propagate count and probability information on CFG. */ ++ afdo_calculate_branch_prob (&annotated_bb); ++ } + } + update_max_bb_count (); + profile_status_for_fn (cfun) = PROFILE_READ; +diff --git a/gcc/cfghooks.cc b/gcc/cfghooks.cc +index c0b7bdcd9..323663010 100644 +--- a/gcc/cfghooks.cc ++++ b/gcc/cfghooks.cc +@@ -542,6 +542,9 @@ split_block_1 (basic_block bb, void *i) + return NULL; + + new_bb->count = bb->count; ++ /* Copy discriminator from original bb for distinguishes among ++ several basic blocks that share a common locus, allowing for ++ more accurate autofdo. */ + new_bb->discriminator = bb->discriminator; + + if (dom_info_available_p (CDI_DOMINATORS)) +@@ -1113,6 +1116,10 @@ duplicate_block (basic_block bb, edge e, basic_block after, copy_bb_data *id) + move_block_after (new_bb, after); + + new_bb->flags = (bb->flags & ~BB_DUPLICATED); ++ /* Copy discriminator from original bb for distinguishes among ++ several basic blocks that share a common locus, allowing for ++ more accurate autofdo. */ ++ new_bb->discriminator = bb->discriminator; + FOR_EACH_EDGE (s, ei, bb->succs) + { + /* Since we are creating edges from a new block to successors +diff --git a/gcc/opts.cc b/gcc/opts.cc +index 2bba88140..4b4925331 100644 +--- a/gcc/opts.cc ++++ b/gcc/opts.cc +@@ -3014,7 +3014,10 @@ common_handle_option (struct gcc_options *opts, + /* FALLTHRU */ + case OPT_fauto_profile: + enable_fdo_optimizations (opts, opts_set, value); +- SET_OPTION_IF_UNSET (opts, opts_set, flag_profile_correction, value); ++ /* 2 is special and means flag_profile_correction trun on by ++ -fauto-profile. */ ++ SET_OPTION_IF_UNSET (opts, opts_set, flag_profile_correction, ++ (value ? 2 : 0)); + break; + + case OPT_fipa_struct_reorg_: +diff --git a/gcc/tree-inline.cc b/gcc/tree-inline.cc +index f892cee3f..f50dbbc52 100644 +--- a/gcc/tree-inline.cc ++++ b/gcc/tree-inline.cc +@@ -2038,6 +2038,10 @@ copy_bb (copy_body_data *id, basic_block bb, + basic_block_info automatically. */ + copy_basic_block = create_basic_block (NULL, (basic_block) prev->aux); + copy_basic_block->count = bb->count.apply_scale (num, den); ++ /* Copy discriminator from original bb for distinguishes among ++ several basic blocks that share a common locus, allowing for ++ more accurate autofdo. */ ++ copy_basic_block->discriminator = bb->discriminator; + + copy_gsi = gsi_start_bb (copy_basic_block); + +@@ -3058,6 +3062,16 @@ copy_cfg_body (copy_body_data * id, + den += e->count (); + ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = den; + } ++ /* When autofdo uses PMU as the sampling unit, the number of ++ ENTRY_BLOCK_PTR_FOR_FN cannot be obtained directly and will ++ be zero. It using for adjust_for_ipa_scaling will cause the ++ inlined BB count incorrectly overestimated. So set den equal ++ to num, which is the source inline BB count to avoid ++ overestimated. */ ++ if (den == profile_count::zero ().afdo ()) ++ { ++ den = num; ++ } + + profile_count::adjust_for_ipa_scaling (&num, &den); + +-- +2.33.0 + diff --git a/0035-Add-insn-defs-and-correct-costs-for-cmlt-generation.patch b/0035-Add-insn-defs-and-correct-costs-for-cmlt-generation.patch new file mode 100644 index 0000000000000000000000000000000000000000..c40886ab41742bb3eb4d4ff9065405ba6fff5065 --- /dev/null +++ b/0035-Add-insn-defs-and-correct-costs-for-cmlt-generation.patch @@ -0,0 +1,194 @@ +From aa39a66f6029fe16a656d7c6339908b953fb1e04 Mon Sep 17 00:00:00 2001 +From: Diachkov Ilia WX1215920 +Date: Thu, 22 Feb 2024 11:27:43 +0300 +Subject: [PATCH 01/18] Add insn defs and correct costs for cmlt generation + +--- + gcc/config/aarch64/aarch64-simd.md | 48 +++++++++++++++++++++++++++++ + gcc/config/aarch64/aarch64.cc | 15 +++++++++ + gcc/config/aarch64/aarch64.opt | 4 +++ + gcc/config/aarch64/iterators.md | 3 +- + gcc/config/aarch64/predicates.md | 25 +++++++++++++++ + gcc/testsuite/gcc.dg/combine-cmlt.c | 20 ++++++++++++ + 6 files changed, 114 insertions(+), 1 deletion(-) + create mode 100755 gcc/testsuite/gcc.dg/combine-cmlt.c + +diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md +index ee7f0b89c..82f73805f 100644 +--- a/gcc/config/aarch64/aarch64-simd.md ++++ b/gcc/config/aarch64/aarch64-simd.md +@@ -6454,6 +6454,54 @@ + [(set_attr "type" "neon_compare, neon_compare_zero")] + ) + ++;; Use cmlt to replace vector arithmetic operations like this (SImode example): ++;; B = (((A >> 15) & 0x00010001) << 16) - ((A >> 15) & 0x00010001) ++;; TODO: maybe extend to scalar operations or other cm** instructions. ++ ++(define_insn "*aarch64_cmlt_as_arith" ++ [(set (match_operand: 0 "register_operand" "=w") ++ (minus: ++ (ashift: ++ (and: ++ (lshiftrt: ++ (match_operand:VDQHSD 1 "register_operand" "w") ++ (match_operand:VDQHSD 2 "half_size_minus_one_operand")) ++ (match_operand:VDQHSD 3 "cmlt_arith_mask_operand")) ++ (match_operand:VDQHSD 4 "half_size_operand")) ++ (and: ++ (lshiftrt: ++ (match_dup 1) ++ (match_dup 2)) ++ (match_dup 3))))] ++ "TARGET_SIMD && flag_cmlt_arith" ++ "cmlt\t%0., %1., #0" ++ [(set_attr "type" "neon_compare_zero")] ++) ++ ++;; The helper definition that allows combiner to use the previous pattern. ++ ++(define_insn_and_split "*arch64_cmlt_tmp" ++ [(set (match_operand: 0 "register_operand" "=w") ++ (and: ++ (lshiftrt: ++ (match_operand:VDQHSD 1 "register_operand" "w") ++ (match_operand:VDQHSD 2 "half_size_minus_one_operand")) ++ (match_operand:VDQHSD 3 "cmlt_arith_mask_operand")))] ++ "TARGET_SIMD && flag_cmlt_arith" ++ "#" ++ "&& reload_completed" ++ [(set (match_operand: 0 "register_operand") ++ (lshiftrt: ++ (match_operand:VDQHSD 1 "register_operand") ++ (match_operand:VDQHSD 2 "half_size_minus_one_operand"))) ++ (set (match_dup 0) ++ (and: ++ (match_dup 0) ++ (match_operand:VDQHSD 3 "cmlt_arith_mask_operand")))] ++ "" ++ [(set_attr "type" "neon_compare_zero")] ++) ++ + (define_insn_and_split "aarch64_cmdi" + [(set (match_operand:DI 0 "register_operand" "=w,w,r") + (neg:DI +diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc +index a3da4ca30..04072ca25 100644 +--- a/gcc/config/aarch64/aarch64.cc ++++ b/gcc/config/aarch64/aarch64.cc +@@ -14064,6 +14064,21 @@ cost_minus: + return true; + } + ++ /* Detect aarch64_cmlt_as_arith instruction. Now only this pattern ++ matches the condition. The costs of cmlt and sub instructions ++ are comparable, so we are not increasing the cost here. */ ++ if (flag_cmlt_arith && GET_CODE (op0) == ASHIFT ++ && GET_CODE (op1) == AND) ++ { ++ rtx op0_subop0 = XEXP (op0, 0); ++ if (rtx_equal_p (op0_subop0, op1)) ++ { ++ rtx lshrt_op = XEXP (op0_subop0, 0); ++ if (GET_CODE (lshrt_op) == LSHIFTRT) ++ return true; ++ } ++ } ++ + /* Look for SUB (extended register). */ + if (is_a (mode) + && aarch64_rtx_arith_op_extract_p (op1)) +diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt +index a64b927e9..101664c7c 100644 +--- a/gcc/config/aarch64/aarch64.opt ++++ b/gcc/config/aarch64/aarch64.opt +@@ -262,6 +262,10 @@ Use an immediate to offset from the stack protector guard register, sp_el0. + This option is for use with fstack-protector-strong and not for use in + user-land code. + ++mcmlt-arith ++Target Var(flag_cmlt_arith) Optimization Init(0) ++Use SIMD cmlt instruction to perform some arithmetic/logic calculations. ++ + TargetVariable + long aarch64_stack_protector_guard_offset = 0 + +diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md +index 26a840d7f..967e6b0b1 100644 +--- a/gcc/config/aarch64/iterators.md ++++ b/gcc/config/aarch64/iterators.md +@@ -1485,7 +1485,8 @@ + (V2DI "2s")]) + + ;; Register suffix narrowed modes for VQN. +-(define_mode_attr V2ntype [(V8HI "16b") (V4SI "8h") ++(define_mode_attr V2ntype [(V4HI "8b") (V2SI "4h") ++ (V8HI "16b") (V4SI "8h") + (V2DI "4s")]) + + ;; Widened modes of vector modes. +diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md +index c308015ac..07c14aacb 100644 +--- a/gcc/config/aarch64/predicates.md ++++ b/gcc/config/aarch64/predicates.md +@@ -49,6 +49,31 @@ + return CONST_INT_P (op) && IN_RANGE (INTVAL (op), 1, 3); + }) + ++(define_predicate "half_size_minus_one_operand" ++ (match_code "const_vector") ++{ ++ op = unwrap_const_vec_duplicate (op); ++ unsigned int size = GET_MODE_UNIT_BITSIZE (mode) / 2; ++ return CONST_INT_P (op) && (UINTVAL (op) == size - 1); ++}) ++ ++(define_predicate "half_size_operand" ++ (match_code "const_vector") ++{ ++ op = unwrap_const_vec_duplicate (op); ++ unsigned int size = GET_MODE_UNIT_BITSIZE (mode) / 2; ++ return CONST_INT_P (op) && (UINTVAL (op) == size); ++}) ++ ++(define_predicate "cmlt_arith_mask_operand" ++ (match_code "const_vector") ++{ ++ op = unwrap_const_vec_duplicate (op); ++ unsigned int size = GET_MODE_UNIT_BITSIZE (mode) / 2; ++ unsigned long long mask = ((unsigned long long) 1 << size) | 1; ++ return CONST_INT_P (op) && (UINTVAL (op) == mask); ++}) ++ + (define_predicate "subreg_lowpart_operator" + (ior (match_code "truncate") + (and (match_code "subreg") +diff --git a/gcc/testsuite/gcc.dg/combine-cmlt.c b/gcc/testsuite/gcc.dg/combine-cmlt.c +new file mode 100755 +index 000000000..b4c9a37ff +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/combine-cmlt.c +@@ -0,0 +1,20 @@ ++/* { dg-do compile { target aarch64-*-* } } */ ++/* { dg-options "-O3 -mcmlt-arith" } */ ++ ++/* The test checks usage of cmlt insns for arithmetic/logic calculations ++ * in foo (). It's inspired by sources of x264 codec. */ ++ ++typedef unsigned short int uint16_t; ++typedef unsigned int uint32_t; ++ ++void foo( uint32_t *a, uint32_t *b) ++{ ++ for (unsigned i = 0; i < 4; i++) ++ { ++ uint32_t s = ((a[i]>>((8 * sizeof(uint16_t))-1)) ++ &(((uint32_t)1<<(8 * sizeof(uint16_t)))+1))*((uint16_t)-1); ++ b[i] = (a[i]+s)^s; ++ } ++} ++ ++/* { dg-final { scan-assembler-times {cmlt\t} 1 } } */ +-- +2.33.0 + diff --git a/0036-rtl-ifcvt-introduce-rtl-ifcvt-enchancements.patch b/0036-rtl-ifcvt-introduce-rtl-ifcvt-enchancements.patch new file mode 100644 index 0000000000000000000000000000000000000000..813eba9323f9b19aff134995289462e26eb04dfa --- /dev/null +++ b/0036-rtl-ifcvt-introduce-rtl-ifcvt-enchancements.patch @@ -0,0 +1,560 @@ +From 4cae948c1c00ad7a59f0f234f809fbd9a0208eb4 Mon Sep 17 00:00:00 2001 +From: vchernon +Date: Wed, 28 Feb 2024 23:05:12 +0800 +Subject: [PATCH 02/18] [rtl-ifcvt] introduce rtl ifcvt enchancements new + option: -fifcvt-allow-complicated-cmps: allows ifcvt to deal + with complicated cmps like + + cmp reg1 (reg2 + reg3) + + can increase compilation time + new param: + -param=ifcvt-allow-register-renaming=[0,1,2] + 1 : allows ifcvt to rename registers in then and else bb + 2 : allows to rename registers in condition and else/then bb + can increase compilation time and register pressure +--- + gcc/common.opt | 4 + + gcc/ifcvt.cc | 291 +++++++++++++++--- + gcc/params.opt | 4 + + .../gcc.c-torture/execute/ifcvt-renaming-1.c | 35 +++ + gcc/testsuite/gcc.dg/ifcvt-6.c | 27 ++ + 5 files changed, 311 insertions(+), 50 deletions(-) + create mode 100644 gcc/testsuite/gcc.c-torture/execute/ifcvt-renaming-1.c + create mode 100644 gcc/testsuite/gcc.dg/ifcvt-6.c + +diff --git a/gcc/common.opt b/gcc/common.opt +index c7c6bc256..aa00fb7b0 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -3691,4 +3691,8 @@ fipa-ra + Common Var(flag_ipa_ra) Optimization + Use caller save register across calls if possible. + ++fifcvt-allow-complicated-cmps ++Common Var(flag_ifcvt_allow_complicated_cmps) Optimization ++Allow RTL if-conversion pass to deal with complicated cmps (can increase compilation time). ++ + ; This comment is to ensure we retain the blank line above. +diff --git a/gcc/ifcvt.cc b/gcc/ifcvt.cc +index 2c1eba312..584db7b55 100644 +--- a/gcc/ifcvt.cc ++++ b/gcc/ifcvt.cc +@@ -886,7 +886,9 @@ noce_emit_store_flag (struct noce_if_info *if_info, rtx x, int reversep, + } + + /* Don't even try if the comparison operands or the mode of X are weird. */ +- if (cond_complex || !SCALAR_INT_MODE_P (GET_MODE (x))) ++ if (!flag_ifcvt_allow_complicated_cmps ++ && (cond_complex ++ || !SCALAR_INT_MODE_P (GET_MODE (x)))) + return NULL_RTX; + + return emit_store_flag (x, code, XEXP (cond, 0), +@@ -1965,7 +1967,8 @@ insn_valid_noce_process_p (rtx_insn *insn, rtx cc) + /* Currently support only simple single sets in test_bb. */ + if (!sset + || !noce_operand_ok (SET_DEST (sset)) +- || contains_ccmode_rtx_p (SET_DEST (sset)) ++ || (!flag_ifcvt_allow_complicated_cmps ++ && contains_ccmode_rtx_p (SET_DEST (sset))) + || !noce_operand_ok (SET_SRC (sset))) + return false; + +@@ -1979,13 +1982,17 @@ insn_valid_noce_process_p (rtx_insn *insn, rtx cc) + in this function. */ + + static bool +-bbs_ok_for_cmove_arith (basic_block bb_a, basic_block bb_b, rtx to_rename) ++bbs_ok_for_cmove_arith (basic_block bb_a, ++ basic_block bb_b, ++ rtx to_rename, ++ bitmap conflict_regs) + { + rtx_insn *a_insn; + bitmap bba_sets = BITMAP_ALLOC (®_obstack); +- ++ bitmap intersections = BITMAP_ALLOC (®_obstack); + df_ref def; + df_ref use; ++ rtx_insn *last_a = last_active_insn (bb_a, FALSE); + + FOR_BB_INSNS (bb_a, a_insn) + { +@@ -1995,18 +2002,15 @@ bbs_ok_for_cmove_arith (basic_block bb_a, basic_block bb_b, rtx to_rename) + rtx sset_a = single_set (a_insn); + + if (!sset_a) +- { +- BITMAP_FREE (bba_sets); +- return false; +- } ++ goto end_cmove_arith_check_and_fail; + /* Record all registers that BB_A sets. */ + FOR_EACH_INSN_DEF (def, a_insn) +- if (!(to_rename && DF_REF_REG (def) == to_rename)) ++ if (!(to_rename && DF_REF_REG (def) == to_rename && a_insn == last_a)) + bitmap_set_bit (bba_sets, DF_REF_REGNO (def)); + } + ++ bitmap_and (intersections, df_get_live_in (bb_b), bba_sets); + rtx_insn *b_insn; +- + FOR_BB_INSNS (bb_b, b_insn) + { + if (!active_insn_p (b_insn)) +@@ -2015,10 +2019,7 @@ bbs_ok_for_cmove_arith (basic_block bb_a, basic_block bb_b, rtx to_rename) + rtx sset_b = single_set (b_insn); + + if (!sset_b) +- { +- BITMAP_FREE (bba_sets); +- return false; +- } ++ goto end_cmove_arith_check_and_fail; + + /* Make sure this is a REG and not some instance + of ZERO_EXTRACT or SUBREG or other dangerous stuff. +@@ -2030,25 +2031,34 @@ bbs_ok_for_cmove_arith (basic_block bb_a, basic_block bb_b, rtx to_rename) + if (MEM_P (SET_DEST (sset_b))) + gcc_assert (rtx_equal_p (SET_DEST (sset_b), to_rename)); + else if (!REG_P (SET_DEST (sset_b))) +- { +- BITMAP_FREE (bba_sets); +- return false; +- } ++ goto end_cmove_arith_check_and_fail; + +- /* If the insn uses a reg set in BB_A return false. */ ++ /* If the insn uses a reg set in BB_A return false ++ or try to collect register list for renaming. */ + FOR_EACH_INSN_USE (use, b_insn) + { +- if (bitmap_bit_p (bba_sets, DF_REF_REGNO (use))) ++ if (bitmap_bit_p (intersections, DF_REF_REGNO (use))) + { +- BITMAP_FREE (bba_sets); +- return false; ++ if (param_ifcvt_allow_register_renaming < 1) ++ goto end_cmove_arith_check_and_fail; ++ ++ /* Those regs should be renamed. We can't rename CC reg, but ++ possibly we can provide combined comparison in the future. */ ++ if (GET_MODE_CLASS (GET_MODE (DF_REF_REG (use))) == MODE_CC) ++ goto end_cmove_arith_check_and_fail; ++ bitmap_set_bit (conflict_regs, DF_REF_REGNO (use)); + } + } +- + } + + BITMAP_FREE (bba_sets); ++ BITMAP_FREE (intersections); + return true; ++ ++end_cmove_arith_check_and_fail: ++ BITMAP_FREE (bba_sets); ++ BITMAP_FREE (intersections); ++ return false; + } + + /* Emit copies of all the active instructions in BB except the last. +@@ -2103,6 +2113,142 @@ noce_emit_bb (rtx last_insn, basic_block bb, bool simple) + return true; + } + ++/* This function tries to rename regs that intersect with considered bb ++ inside condition expression. Condition expression will be moved down ++ if the optimization will be applied, so it is essential to be sure that ++ all intersected registers will be renamed otherwise transformation ++ can't be applied. Function returns true if renaming was successful ++ and optimization can proceed futher. */ ++ ++static bool ++noce_rename_regs_in_cond (struct noce_if_info *if_info, bitmap cond_rename_regs) ++{ ++ bool success = true; ++ if (bitmap_empty_p (cond_rename_regs)) ++ return true; ++ if (param_ifcvt_allow_register_renaming < 2) ++ return false; ++ df_ref use; ++ rtx_insn *cmp_insn = if_info->cond_earliest; ++ /* Jump instruction as a condion currently unsupported. */ ++ if (JUMP_P (cmp_insn)) ++ return false; ++ rtx_insn *before_cmp = PREV_INSN (cmp_insn); ++ start_sequence (); ++ rtx_insn *copy_of_cmp = as_a (copy_rtx (cmp_insn)); ++ basic_block cmp_block = BLOCK_FOR_INSN (cmp_insn); ++ FOR_EACH_INSN_USE (use, cmp_insn) ++ { ++ if (bitmap_bit_p (cond_rename_regs, DF_REF_REGNO (use))) ++ { ++ rtx use_reg = DF_REF_REG (use); ++ rtx tmp = gen_reg_rtx (GET_MODE (use_reg)); ++ if (!validate_replace_rtx (use_reg, tmp, copy_of_cmp)) ++ { ++ end_sequence (); ++ return false; ++ } ++ noce_emit_move_insn (tmp, use_reg); ++ } ++ } ++ ++ emit_insn (PATTERN (copy_of_cmp)); ++ rtx_insn *seq = get_insns (); ++ unshare_all_rtl_in_chain (seq); ++ end_sequence (); ++ ++ emit_insn_after_setloc (seq, before_cmp, INSN_LOCATION (cmp_insn)); ++ delete_insn_and_edges (cmp_insn); ++ rtx_insn *insn; ++ FOR_BB_INSNS (cmp_block, insn) ++ df_insn_rescan (insn); ++ ++ if_info->cond = noce_get_condition (if_info->jump, ++ ©_of_cmp, ++ if_info->then_else_reversed); ++ if_info->cond_earliest = copy_of_cmp; ++ if_info->rev_cond = NULL_RTX; ++ ++ return success; ++} ++ ++/* This function tries to rename regs that intersect with considered bb. ++ return true if the renaming was successful and optimization can ++ proceed futher, false otherwise. */ ++static bool ++noce_rename_regs_in_bb (basic_block test_bb, bitmap rename_regs) ++{ ++ if (bitmap_empty_p (rename_regs)) ++ return true; ++ rtx_insn *insn; ++ rtx_insn *last_insn = last_active_insn (test_bb, FALSE); ++ bool res = true; ++ start_sequence (); ++ FOR_BB_INSNS (test_bb, insn) ++ { ++ if (!active_insn_p (insn)) ++ continue; ++ /* Only ssets are supported for now. */ ++ rtx sset = single_set (insn); ++ gcc_assert (sset); ++ rtx x = SET_DEST (sset); ++ if (!REG_P (x) || !bitmap_bit_p (rename_regs, REGNO (x))) ++ continue; ++ /* Do not need to rename dest in the last instruction ++ it will be renamed anyway. */ ++ if (insn == last_insn) ++ continue; ++ machine_mode mode = GET_MODE (x); ++ rtx tmp = gen_reg_rtx (mode); ++ if (!validate_replace_rtx_part (x, tmp, &SET_DEST (sset), insn)) ++ { ++ gcc_assert (insn != last_insn); ++ /* We can generate additional move for such case, ++ but it will increase register preasure. ++ For now just stop transformation. */ ++ rtx result_rtx = SET_DEST (single_set (last_insn)); ++ if (REG_P (result_rtx) && (x != result_rtx)) ++ { ++ res = false; ++ break; ++ } ++ if (!validate_replace_rtx (x, tmp, insn)) ++ gcc_unreachable (); ++ noce_emit_move_insn (tmp,x); ++ } ++ set_used_flags (insn); ++ rtx_insn *rename_candidate; ++ for (rename_candidate = NEXT_INSN (insn); ++ rename_candidate && rename_candidate!= NEXT_INSN (BB_END (test_bb)); ++ rename_candidate = NEXT_INSN (rename_candidate)) ++ { ++ if (!reg_overlap_mentioned_p (x, rename_candidate)) ++ continue; ++ ++ int replace_res = TRUE; ++ if (rename_candidate == last_insn) ++ { ++ validate_replace_src_group (x, tmp, rename_candidate); ++ replace_res = apply_change_group (); ++ } ++ else ++ replace_res = validate_replace_rtx (x, tmp, rename_candidate); ++ gcc_assert (replace_res); ++ set_used_flags (rename_candidate); ++ } ++ set_used_flags (x); ++ set_used_flags (tmp); ++ } ++ rtx_insn *seq = get_insns (); ++ unshare_all_rtl_in_chain (seq); ++ end_sequence (); ++ emit_insn_before_setloc (seq, first_active_insn (test_bb), ++ INSN_LOCATION (first_active_insn (test_bb))); ++ FOR_BB_INSNS (test_bb, insn) ++ df_insn_rescan (insn); ++ return res; ++} ++ + /* Try more complex cases involving conditional_move. */ + + static int +@@ -2185,11 +2331,30 @@ noce_try_cmove_arith (struct noce_if_info *if_info) + std::swap (then_bb, else_bb); + } + } +- ++ bitmap else_bb_rename_regs = BITMAP_ALLOC (®_obstack); ++ bitmap then_bb_rename_regs = BITMAP_ALLOC (®_obstack); + if (then_bb && else_bb +- && (!bbs_ok_for_cmove_arith (then_bb, else_bb, if_info->orig_x) +- || !bbs_ok_for_cmove_arith (else_bb, then_bb, if_info->orig_x))) +- return FALSE; ++ && (!bbs_ok_for_cmove_arith (then_bb, else_bb, ++ if_info->orig_x, ++ then_bb_rename_regs) ++ || !bbs_ok_for_cmove_arith (else_bb, then_bb, ++ if_info->orig_x, ++ else_bb_rename_regs))) ++ { ++ BITMAP_FREE (then_bb_rename_regs); ++ BITMAP_FREE (else_bb_rename_regs); ++ return FALSE; ++ } ++ bool prepass_renaming = noce_rename_regs_in_bb (then_bb, ++ then_bb_rename_regs) ++ && noce_rename_regs_in_bb (else_bb, ++ else_bb_rename_regs); ++ ++ BITMAP_FREE (then_bb_rename_regs); ++ BITMAP_FREE (else_bb_rename_regs); ++ ++ if (!prepass_renaming) ++ return FALSE; + + start_sequence (); + +@@ -3072,7 +3237,8 @@ noce_operand_ok (const_rtx op) + + static bool + bb_valid_for_noce_process_p (basic_block test_bb, rtx cond, +- unsigned int *cost, bool *simple_p) ++ unsigned int *cost, bool *simple_p, ++ bitmap cond_rename_regs) + { + if (!test_bb) + return false; +@@ -3112,8 +3278,9 @@ bb_valid_for_noce_process_p (basic_block test_bb, rtx cond, + rtx_insn *prev_last_insn = PREV_INSN (last_insn); + gcc_assert (prev_last_insn); + +- /* For now, disallow setting x multiple times in test_bb. */ +- if (REG_P (x) && reg_set_between_p (x, first_insn, prev_last_insn)) ++ if (REG_P (x) ++ && reg_set_between_p (x, first_insn, prev_last_insn) ++ && param_ifcvt_allow_register_renaming < 1) + return false; + + bitmap test_bb_temps = BITMAP_ALLOC (®_obstack); +@@ -3125,25 +3292,35 @@ bb_valid_for_noce_process_p (basic_block test_bb, rtx cond, + rtx_insn *insn; + FOR_BB_INSNS (test_bb, insn) + { +- if (insn != last_insn) +- { +- if (!active_insn_p (insn)) +- continue; ++ if (insn == last_insn) ++ continue; ++ if (!active_insn_p (insn)) ++ continue; + +- if (!insn_valid_noce_process_p (insn, cc)) +- goto free_bitmap_and_fail; ++ if (!insn_valid_noce_process_p (insn, cc)) ++ goto free_bitmap_and_fail; + +- rtx sset = single_set (insn); +- gcc_assert (sset); ++ rtx sset = single_set (insn); ++ gcc_assert (sset); + +- if (contains_mem_rtx_p (SET_SRC (sset)) +- || !REG_P (SET_DEST (sset)) +- || reg_overlap_mentioned_p (SET_DEST (sset), cond)) +- goto free_bitmap_and_fail; ++ if (contains_mem_rtx_p (SET_SRC (sset)) ++ || !REG_P (SET_DEST (sset))) ++ goto free_bitmap_and_fail; + +- potential_cost += pattern_cost (sset, speed_p); +- bitmap_set_bit (test_bb_temps, REGNO (SET_DEST (sset))); ++ if (reg_overlap_mentioned_p (SET_DEST (sset), cond)) ++ { ++ if (param_ifcvt_allow_register_renaming < 1) ++ goto free_bitmap_and_fail; ++ rtx sset_dest = SET_DEST (sset); ++ if (REG_P (sset_dest) ++ && (GET_MODE_CLASS (GET_MODE (sset_dest)) != MODE_CC)) ++ bitmap_set_bit (cond_rename_regs, REGNO (sset_dest)); ++ else ++ goto free_bitmap_and_fail; + } ++ potential_cost += pattern_cost (sset, speed_p); ++ if (SET_DEST (sset) != SET_DEST (last_set)) ++ bitmap_set_bit (test_bb_temps, REGNO (SET_DEST (sset))); + } + + /* If any of the intermediate results in test_bb are live after test_bb +@@ -3777,15 +3954,29 @@ noce_process_if_block (struct noce_if_info *if_info) + + bool speed_p = optimize_bb_for_speed_p (test_bb); + unsigned int then_cost = 0, else_cost = 0; ++ bitmap cond_rename_regs = BITMAP_ALLOC (®_obstack); + if (!bb_valid_for_noce_process_p (then_bb, cond, &then_cost, +- &if_info->then_simple)) +- return false; ++ &if_info->then_simple, cond_rename_regs)) ++ { ++ BITMAP_FREE (cond_rename_regs); ++ return false; ++ } + + if (else_bb + && !bb_valid_for_noce_process_p (else_bb, cond, &else_cost, +- &if_info->else_simple)) +- return false; ++ &if_info->else_simple, cond_rename_regs)) ++ { ++ BITMAP_FREE (cond_rename_regs); ++ return false; ++ } + ++ if (!noce_rename_regs_in_cond (if_info, cond_rename_regs)) ++ { ++ BITMAP_FREE (cond_rename_regs); ++ return false; ++ } ++ BITMAP_FREE (cond_rename_regs); ++ cond = if_info->cond; + if (speed_p) + if_info->original_cost += average_cost (then_cost, else_cost, + find_edge (test_bb, then_bb)); +@@ -5823,12 +6014,13 @@ if_convert (bool after_combine) + { + basic_block bb; + int pass; +- + if (optimize == 1) + { + df_live_add_problem (); + df_live_set_all_dirty (); + } ++ free_dominance_info (CDI_DOMINATORS); ++ cleanup_cfg (CLEANUP_EXPENSIVE); + + /* Record whether we are after combine pass. */ + ifcvt_after_combine = after_combine; +@@ -5933,7 +6125,6 @@ rest_of_handle_if_conversion (void) + dump_reg_info (dump_file); + dump_flow_info (dump_file, dump_flags); + } +- cleanup_cfg (CLEANUP_EXPENSIVE); + if_convert (false); + if (num_updated_if_blocks) + /* Get rid of any dead CC-related instructions. */ +diff --git a/gcc/params.opt b/gcc/params.opt +index d2196dc68..ba87f820b 100644 +--- a/gcc/params.opt ++++ b/gcc/params.opt +@@ -669,6 +669,10 @@ Maximum permissible cost for the sequence that would be generated by the RTL if- + Common Joined UInteger Var(param_max_rtl_if_conversion_unpredictable_cost) Init(40) IntegerRange(0, 200) Param Optimization + Maximum permissible cost for the sequence that would be generated by the RTL if-conversion pass for a branch that is considered unpredictable. + ++-param=ifcvt-allow-register-renaming= ++Common Joined UInteger Var(param_ifcvt_allow_register_renaming) IntegerRange(0, 2) Param Optimization ++Allow RTL if-conversion pass to aggressively rename registers in basic blocks. Sometimes additional moves will be created. ++ + -param=max-sched-extend-regions-iters= + Common Joined UInteger Var(param_max_sched_extend_regions_iters) Param Optimization + The maximum number of iterations through CFG to extend regions. +diff --git a/gcc/testsuite/gcc.c-torture/execute/ifcvt-renaming-1.c b/gcc/testsuite/gcc.c-torture/execute/ifcvt-renaming-1.c +new file mode 100644 +index 000000000..65c4d4140 +--- /dev/null ++++ b/gcc/testsuite/gcc.c-torture/execute/ifcvt-renaming-1.c +@@ -0,0 +1,35 @@ ++ ++extern void abort(void); ++ ++__attribute__ ((noinline)) ++int foo (int x, int y, int z, int a, int b) ++{ ++ if (a < 2) { ++ if (a == 0) { ++ if (x - y < 0) ++ x = x - y + z; ++ else ++ x = x - y; ++ } ++ else { ++ if (x + y >= z) ++ x = x + y - z; ++ else ++ x = x + y; ++ } ++ } ++ return x; ++} ++ ++int main(void) { ++ if (foo (5,10,7,0,1) != 2) // x - y + z = -5 + 7 = 2 ++ abort (); ++ if (foo (50,10,7,0,1) != 40) // x - y = 40 ++ abort (); ++ if (foo (5,10,7,1,1) != 8) // x + y - z = 5 + 10 - 7 = 8 ++ abort (); ++ if (foo (5,10,70,1,1) != 15) // x + y = 15 ++ abort (); ++ return 0; ++} ++ +diff --git a/gcc/testsuite/gcc.dg/ifcvt-6.c b/gcc/testsuite/gcc.dg/ifcvt-6.c +new file mode 100644 +index 000000000..be9a67b3f +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/ifcvt-6.c +@@ -0,0 +1,27 @@ ++/* { dg-do compile { target { aarch64*-*-* } } } */ ++/* { dg-options "-fdump-rtl-ce1 -O2 --param max-rtl-if-conversion-unpredictable-cost=100 --param max-rtl-if-conversion-predictable-cost=100 --param=ifcvt-allow-register-renaming=2 -fifcvt-allow-complicated-cmps" } */ ++ ++typedef unsigned int uint16_t; ++ ++uint16_t ++foo (uint16_t x, uint16_t y, uint16_t z, uint16_t a, ++ uint16_t b, uint16_t c, uint16_t d) { ++ int i = 1; ++ int j = 1; ++ if (a > b) { ++ j = x; ++ if (b > c) ++ i = y; ++ else ++ i = z; ++ } ++ else { ++ j = y; ++ if (c > d) ++ i = z; ++ } ++ return i * j; ++} ++ ++/* { dg-final { scan-rtl-dump "7 true changes made" "ce1" } } */ ++ +-- +2.33.0 + diff --git a/0037-Perform-early-if-conversion-of-simple-arithmetic.patch b/0037-Perform-early-if-conversion-of-simple-arithmetic.patch new file mode 100644 index 0000000000000000000000000000000000000000..14de678e3eb6cf0242eb59aaeecc2dd340c34c39 --- /dev/null +++ b/0037-Perform-early-if-conversion-of-simple-arithmetic.patch @@ -0,0 +1,109 @@ +From 310eade1450995b55d9f8120561022fbf164b2ec Mon Sep 17 00:00:00 2001 +From: Pronin Alexander 00812787 +Date: Thu, 12 Jan 2023 14:52:49 +0300 +Subject: [PATCH 03/18] Perform early if-conversion of simple arithmetic + +--- + gcc/common.opt | 4 ++++ + gcc/match.pd | 25 +++++++++++++++++++ + gcc/testsuite/gcc.dg/ifcvt-gimple.c | 37 +++++++++++++++++++++++++++++ + 3 files changed, 66 insertions(+) + create mode 100644 gcc/testsuite/gcc.dg/ifcvt-gimple.c + +diff --git a/gcc/common.opt b/gcc/common.opt +index aa00fb7b0..dac477c04 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1821,6 +1821,10 @@ fif-conversion2 + Common Var(flag_if_conversion2) Optimization + Perform conversion of conditional jumps to conditional execution. + ++fif-conversion-gimple ++Common Var(flag_if_conversion_gimple) Optimization ++Perform conversion of conditional jumps to branchless equivalents during gimple transformations. ++ + fstack-reuse= + Common Joined RejectNegative Enum(stack_reuse_level) Var(flag_stack_reuse) Init(SR_ALL) Optimization + -fstack-reuse=[all|named_vars|none] Set stack reuse level for local variables. +diff --git a/gcc/match.pd b/gcc/match.pd +index 6f24d5079..3cbaf2a5b 100644 +--- a/gcc/match.pd ++++ b/gcc/match.pd +@@ -4278,6 +4278,31 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) + ) + ) + ) ++ ++(if (flag_if_conversion_gimple) ++ (for simple_op (plus minus bit_and bit_ior bit_xor) ++ (simplify ++ (cond @0 (simple_op @1 INTEGER_CST@2) @1) ++ (switch ++ /* a = cond ? a + 1 : a -> a = a + ((int) cond) */ ++ (if (integer_onep (@2)) ++ (simple_op @1 (convert (convert:boolean_type_node @0)))) ++ /* a = cond ? a + powerof2cst : a -> ++ a = a + ((int) cond) << log2 (powerof2cst) */ ++ (if (INTEGRAL_TYPE_P (type) && integer_pow2p (@2)) ++ (with ++ { ++ tree shift = build_int_cst (integer_type_node, tree_log2 (@2)); ++ } ++ (simple_op @1 (lshift (convert (convert:boolean_type_node @0)) ++ { shift; }) ++ ) ++ ) ++ ) ++ ) ++ ) ++ ) ++) + #endif + + #if GIMPLE +diff --git a/gcc/testsuite/gcc.dg/ifcvt-gimple.c b/gcc/testsuite/gcc.dg/ifcvt-gimple.c +new file mode 100644 +index 000000000..0f7c87e5c +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/ifcvt-gimple.c +@@ -0,0 +1,37 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fif-conversion-gimple -fdump-tree-optimized" } */ ++ ++int test_int (int optimizable_int) { ++ if (optimizable_int > 5) ++ ++optimizable_int; ++ return optimizable_int; ++} ++ ++int test_int_pow2 (int optimizable_int_pow2) { ++ if (optimizable_int_pow2 <= 4) ++ optimizable_int_pow2 += 1024; ++ return optimizable_int_pow2; ++} ++ ++int test_int_non_pow2 (int not_optimizable_int_non_pow2) { ++ if (not_optimizable_int_non_pow2 == 1) ++ not_optimizable_int_non_pow2 += 513; ++ return not_optimizable_int_non_pow2; ++} ++ ++float test_float (float not_optimizable_float) { ++ if (not_optimizable_float > 5) ++ not_optimizable_float += 1; ++ return not_optimizable_float; ++} ++ ++/* Expecting if-else block in test_float and test_int_non_pow2 only. */ ++/* { dg-final { scan-tree-dump-not "if \\(optimizable" "optimized" } } */ ++/* { dg-final { scan-tree-dump "if \\(not_optimizable_int_non_pow2" "optimized" } } */ ++/* { dg-final { scan-tree-dump "if \\(not_optimizable_float" "optimized" } } */ ++/* { dg-final { scan-tree-dump-times "if " 2 "optimized" } } */ ++/* { dg-final { scan-tree-dump-times "else" 2 "optimized" } } */ ++ ++/* Expecting shifted result only for optimizable_int_pow2. */ ++/* { dg-final { scan-tree-dump-times " << " 1 "optimized" } } */ ++/* { dg-final { scan-tree-dump " << 10;" "optimized" } } */ +-- +2.33.0 + diff --git a/0038-Add-option-to-allow-matching-uaddsub-overflow-for-wi.patch b/0038-Add-option-to-allow-matching-uaddsub-overflow-for-wi.patch new file mode 100644 index 0000000000000000000000000000000000000000..9b2be003020a7f8af73007a10dbdccc38d7935a9 --- /dev/null +++ b/0038-Add-option-to-allow-matching-uaddsub-overflow-for-wi.patch @@ -0,0 +1,252 @@ +From 6684509e81e4341675c73a7dc853180229a8abcb Mon Sep 17 00:00:00 2001 +From: Pronin Alexander 00812787 +Date: Tue, 24 Jan 2023 16:43:40 +0300 +Subject: [PATCH 04/18] Add option to allow matching uaddsub overflow for widen + ops too. + +--- + gcc/common.opt | 5 ++ + gcc/testsuite/gcc.dg/uaddsub.c | 143 +++++++++++++++++++++++++++++++++ + gcc/tree-ssa-math-opts.cc | 43 ++++++++-- + 3 files changed, 184 insertions(+), 7 deletions(-) + create mode 100644 gcc/testsuite/gcc.dg/uaddsub.c + +diff --git a/gcc/common.opt b/gcc/common.opt +index dac477c04..39c90604e 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -3106,6 +3106,11 @@ freciprocal-math + Common Var(flag_reciprocal_math) SetByCombined Optimization + Same as -fassociative-math for expressions which include division. + ++fuaddsub-overflow-match-all ++Common Var(flag_uaddsub_overflow_match_all) ++Match unsigned add/sub overflow even if the target does not support ++the corresponding instruction. ++ + ; Nonzero means that unsafe floating-point math optimizations are allowed + ; for the sake of speed. IEEE compliance is not guaranteed, and operations + ; are allowed to assume that their arguments and results are "normal" +diff --git a/gcc/testsuite/gcc.dg/uaddsub.c b/gcc/testsuite/gcc.dg/uaddsub.c +new file mode 100644 +index 000000000..96c26d308 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/uaddsub.c +@@ -0,0 +1,143 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fuaddsub-overflow-match-all -fdump-tree-optimized" } */ ++#include ++ ++typedef unsigned __int128 uint128_t; ++typedef struct uint256_t ++{ ++ uint128_t lo; ++ uint128_t hi; ++} uint256_t; ++ ++uint16_t add16 (uint8_t a, uint8_t b) ++{ ++ uint8_t tmp = a + b; ++ uint8_t overflow = 0; ++ if (tmp < a) ++ overflow = 1; ++ ++ uint16_t res = overflow; ++ res <<= 8; ++ res += tmp; ++ return res; ++} ++ ++uint32_t add32 (uint16_t a, uint16_t b) ++{ ++ uint16_t tmp = a + b; ++ uint16_t overflow = 0; ++ if (tmp < a) ++ overflow = 1; ++ ++ uint32_t res = overflow; ++ res <<= 16; ++ res += tmp; ++ return res; ++} ++ ++uint64_t add64 (uint32_t a, uint32_t b) ++{ ++ uint32_t tmp = a + b; ++ uint32_t overflow = 0; ++ if (tmp < a) ++ overflow = 1; ++ ++ uint64_t res = overflow; ++ res <<= 32; ++ res += tmp; ++ return res; ++} ++ ++uint128_t add128 (uint64_t a, uint64_t b) ++{ ++ uint64_t tmp = a + b; ++ uint64_t overflow = 0; ++ if (tmp < a) ++ overflow = 1; ++ ++ uint128_t res = overflow; ++ res <<= 64; ++ res += tmp; ++ return res; ++} ++ ++uint256_t add256 (uint128_t a, uint128_t b) ++{ ++ uint128_t tmp = a + b; ++ uint128_t overflow = 0; ++ if (tmp < a) ++ overflow = 1; ++ ++ uint256_t res; ++ res.hi = overflow; ++ res.lo = tmp; ++ return res; ++} ++ ++uint16_t sub16 (uint8_t a, uint8_t b) ++{ ++ uint8_t tmp = a - b; ++ uint8_t overflow = 0; ++ if (tmp > a) ++ overflow = -1; ++ ++ uint16_t res = overflow; ++ res <<= 8; ++ res += tmp; ++ return res; ++} ++ ++uint32_t sub32 (uint16_t a, uint16_t b) ++{ ++ uint16_t tmp = a - b; ++ uint16_t overflow = 0; ++ if (tmp > a) ++ overflow = -1; ++ ++ uint32_t res = overflow; ++ res <<= 16; ++ res += tmp; ++ return res; ++} ++ ++uint64_t sub64 (uint32_t a, uint32_t b) ++{ ++ uint32_t tmp = a - b; ++ uint32_t overflow = 0; ++ if (tmp > a) ++ overflow = -1; ++ ++ uint64_t res = overflow; ++ res <<= 32; ++ res += tmp; ++ return res; ++} ++ ++uint128_t sub128 (uint64_t a, uint64_t b) ++{ ++ uint64_t tmp = a - b; ++ uint64_t overflow = 0; ++ if (tmp > a) ++ overflow = -1; ++ ++ uint128_t res = overflow; ++ res <<= 64; ++ res += tmp; ++ return res; ++} ++ ++uint256_t sub256 (uint128_t a, uint128_t b) ++{ ++ uint128_t tmp = a - b; ++ uint128_t overflow = 0; ++ if (tmp > a) ++ overflow = -1; ++ ++ uint256_t res; ++ res.hi = overflow; ++ res.lo = tmp; ++ return res; ++} ++ ++/* { dg-final { scan-tree-dump-times "= .ADD_OVERFLOW \\(a_\[0-9\]+\\(D\\), b_\[0-9\]+\\(D\\)\\)" 5 "optimized" } } */ ++/* { dg-final { scan-tree-dump-times "= .SUB_OVERFLOW \\(a_\[0-9\]+\\(D\\), b_\[0-9\]+\\(D\\)\\)" 5 "optimized" } } */ +diff --git a/gcc/tree-ssa-math-opts.cc b/gcc/tree-ssa-math-opts.cc +index 232e903b0..55d6ee8ae 100644 +--- a/gcc/tree-ssa-math-opts.cc ++++ b/gcc/tree-ssa-math-opts.cc +@@ -3468,6 +3468,27 @@ convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2, + } + } + ++/* Check if the corresponding operation has wider equivalent on the target. */ ++ ++static bool ++wider_optab_check_p (optab op, machine_mode mode, int unsignedp) ++{ ++ machine_mode wider_mode; ++ FOR_EACH_WIDER_MODE (wider_mode, mode) ++ { ++ machine_mode next_mode; ++ if (optab_handler (op, wider_mode) != CODE_FOR_nothing ++ || (op == smul_optab ++ && GET_MODE_WIDER_MODE (wider_mode).exists (&next_mode) ++ && (find_widening_optab_handler ((unsignedp ++ ? umul_widen_optab ++ : smul_widen_optab), ++ next_mode, mode)))) ++ return true; ++ } ++ ++ return false; ++} + + /* Helper function of match_arith_overflow. For MUL_OVERFLOW, if we have + a check for non-zero like: +@@ -3903,15 +3924,22 @@ match_arith_overflow (gimple_stmt_iterator *gsi, gimple *stmt, + || code == MINUS_EXPR + || code == MULT_EXPR + || code == BIT_NOT_EXPR); ++ int unsignedp = TYPE_UNSIGNED (type); + if (!INTEGRAL_TYPE_P (type) +- || !TYPE_UNSIGNED (type) +- || has_zero_uses (lhs) +- || (code != PLUS_EXPR +- && code != MULT_EXPR +- && optab_handler (code == MINUS_EXPR ? usubv4_optab : uaddv4_optab, +- TYPE_MODE (type)) == CODE_FOR_nothing)) ++ || !unsignedp ++ || has_zero_uses (lhs)) + return false; + ++ if (code == PLUS_EXPR || code == MINUS_EXPR) ++ { ++ machine_mode mode = TYPE_MODE (type); ++ optab op = code == PLUS_EXPR ? uaddv4_optab : usubv4_optab; ++ if (optab_handler (op, mode) == CODE_FOR_nothing ++ && (!flag_uaddsub_overflow_match_all ++ || !wider_optab_check_p (op, mode, unsignedp))) ++ return false; ++ } ++ + tree rhs1 = gimple_assign_rhs1 (stmt); + tree rhs2 = gimple_assign_rhs2 (stmt); + FOR_EACH_IMM_USE_FAST (use_p, iter, lhs) +@@ -3986,7 +4014,8 @@ match_arith_overflow (gimple_stmt_iterator *gsi, gimple *stmt, + || (code != MULT_EXPR && (code == BIT_NOT_EXPR ? use_seen : !use_seen)) + || (code == PLUS_EXPR + && optab_handler (uaddv4_optab, +- TYPE_MODE (type)) == CODE_FOR_nothing) ++ TYPE_MODE (type)) == CODE_FOR_nothing ++ && !flag_uaddsub_overflow_match_all) + || (code == MULT_EXPR + && optab_handler (cast_stmt ? mulv4_optab : umulv4_optab, + TYPE_MODE (type)) == CODE_FOR_nothing)) +-- +2.33.0 + diff --git a/0039-Match-double-sized-mul-pattern.patch b/0039-Match-double-sized-mul-pattern.patch new file mode 100644 index 0000000000000000000000000000000000000000..9d4e56f24732f19ce3d77e9c6bea04549b45c099 --- /dev/null +++ b/0039-Match-double-sized-mul-pattern.patch @@ -0,0 +1,488 @@ +From e7b22f97f960b62e555dfd6f2e3ae43973fcbb3e Mon Sep 17 00:00:00 2001 +From: Pronin Alexander 00812787 +Date: Wed, 25 Jan 2023 15:04:07 +0300 +Subject: [PATCH 05/18] Match double sized mul pattern + +--- + gcc/match.pd | 136 +++++++++++++++++++++ + gcc/testsuite/gcc.dg/double_sized_mul-1.c | 141 ++++++++++++++++++++++ + gcc/testsuite/gcc.dg/double_sized_mul-2.c | 62 ++++++++++ + gcc/tree-ssa-math-opts.cc | 80 ++++++++++++ + 4 files changed, 419 insertions(+) + create mode 100644 gcc/testsuite/gcc.dg/double_sized_mul-1.c + create mode 100644 gcc/testsuite/gcc.dg/double_sized_mul-2.c + +diff --git a/gcc/match.pd b/gcc/match.pd +index 3cbaf2a5b..61866cb90 100644 +--- a/gcc/match.pd ++++ b/gcc/match.pd +@@ -7895,3 +7895,139 @@ and, + == TYPE_UNSIGNED (TREE_TYPE (@3)))) + && single_use (@4) + && single_use (@5)))) ++ ++/* Match multiplication with double sized result. ++ ++ Consider the following calculations: ++ arg0 * arg1 = (2^(bit_size/2) * arg0_hi + arg0_lo) ++ * (2^(bit_size/2) * arg1_hi + arg1_lo) ++ arg0 * arg1 = 2^bit_size * arg0_hi * arg1_hi ++ + 2^(bit_size/2) * (arg0_hi * arg1_lo + arg0_lo * arg1_hi) ++ + arg0_lo * arg1_lo ++ ++ The products of high and low parts fits in bit_size values, thus they are ++ placed in high and low parts of result respectively. ++ ++ The sum of the mixed products may overflow, so we need a detection for that. ++ Also it has a bit_size/2 offset, thus it intersects with both high and low ++ parts of result. Overflow detection constant is bit_size/2 due to this. ++ ++ With this info: ++ arg0 * arg1 = 2^bit_size * arg0_hi * arg1_hi ++ + 2^(bit_size/2) * middle ++ + 2^bit_size * possible_middle_overflow ++ + arg0_lo * arg1_lo ++ arg0 * arg1 = 2^bit_size * (arg0_hi * arg1_hi + possible_middle_overflow) ++ + 2^(bit_size/2) * (2^(bit_size/2) * middle_hi + middle_lo) ++ + arg0_lo * arg1_lo ++ arg0 * arg1 = 2^bit_size * (arg0_hi * arg1_hi + middle_hi ++ + possible_middle_overflow) ++ + 2^(bit_size/2) * middle_lo ++ + arg0_lo * arg1_lo ++ ++ The last sum can produce overflow for the high result part. With this: ++ arg0 * arg1 = 2^bit_size * (arg0_hi * arg1_hi + possible_middle_overflow ++ + possible_res_lo_overflow + middle_hi) ++ + res_lo ++ = res_hi + res_lo ++ ++ This formula is quite big to fit into one match pattern with all of the ++ combinations of terms inside it. There are many helpers for better code ++ readability. ++ ++ The simplification basis is res_hi: assuming that res_lo only is not ++ real practical case for such calculations. ++ ++ Overflow handling is done via matching complex calculations: ++ the realpart and imagpart are quite handy here. */ ++/* Match low and high parts of the argument. */ ++(match (double_size_mul_arg_lo @0 @1) ++ (bit_and @0 INTEGER_CST@1) ++ (if (wi::to_wide (@1) ++ == wi::mask (TYPE_PRECISION (type) / 2, false, TYPE_PRECISION (type))))) ++(match (double_size_mul_arg_hi @0 @1) ++ (rshift @0 INTEGER_CST@1) ++ (if (wi::to_wide (@1) == TYPE_PRECISION (type) / 2))) ++ ++/* Match various argument parts products. */ ++(match (double_size_mul_lolo @0 @1) ++ (mult@4 (double_size_mul_arg_lo @0 @2) (double_size_mul_arg_lo @1 @3)) ++ (if (single_use (@4)))) ++(match (double_size_mul_hihi @0 @1) ++ (mult@4 (double_size_mul_arg_hi @0 @2) (double_size_mul_arg_hi @1 @3)) ++ (if (single_use (@4)))) ++(match (double_size_mul_lohi @0 @1) ++ (mult:c@4 (double_size_mul_arg_lo @0 @2) (double_size_mul_arg_hi @1 @3)) ++ (if (single_use (@4)))) ++ ++/* Match complex middle sum. */ ++(match (double_size_mul_middle_complex @0 @1) ++ (IFN_ADD_OVERFLOW@2 (double_size_mul_lohi @0 @1) (double_size_mul_lohi @1 @0)) ++ (if (num_imm_uses (@2) == 2))) ++ ++/* Match real middle results. */ ++(match (double_size_mul_middle @0 @1) ++ (realpart@2 (double_size_mul_middle_complex @0 @1)) ++ (if (num_imm_uses (@2) == 2))) ++(match (double_size_mul_middleres_lo @0 @1) ++ (lshift@3 (double_size_mul_middle @0 @1) INTEGER_CST@2) ++ (if (wi::to_wide (@2) == TYPE_PRECISION (type) / 2 ++ && single_use (@3)))) ++(match (double_size_mul_middleres_hi @0 @1) ++ (rshift@3 (double_size_mul_middle @0 @1) INTEGER_CST@2) ++ (if (wi::to_wide (@2) == TYPE_PRECISION (type) / 2 ++ && single_use (@3)))) ++ ++/* Match low result part. */ ++/* Number of uses may be < 2 in case when we are interested in ++ high part only. */ ++(match (double_size_mul_res_lo_complex @0 @1) ++ (IFN_ADD_OVERFLOW:c@2 ++ (double_size_mul_lolo:c @0 @1) (double_size_mul_middleres_lo @0 @1)) ++ (if (num_imm_uses (@2) <= 2))) ++(match (double_size_mul_res_lo @0 @1) ++ (realpart (double_size_mul_res_lo_complex @0 @1))) ++ ++/* Match overflow terms. */ ++(match (double_size_mul_overflow_check_lo @0 @1 @5) ++ (convert@4 (ne@3 ++ (imagpart@2 (double_size_mul_res_lo_complex@5 @0 @1)) integer_zerop)) ++ (if (single_use (@2) && single_use (@3) && single_use (@4)))) ++(match (double_size_mul_overflow_check_hi @0 @1) ++ (lshift@6 (convert@5 (ne@4 ++ (imagpart@3 (double_size_mul_middle_complex @0 @1)) integer_zerop)) ++ INTEGER_CST@2) ++ (if (wi::to_wide (@2) == TYPE_PRECISION (type) / 2 ++ && single_use (@3) && single_use (@4) && single_use (@5) ++ && single_use (@6)))) ++ ++/* Match all possible permutations for high result part calculations. */ ++(for op1 (double_size_mul_hihi ++ double_size_mul_overflow_check_hi ++ double_size_mul_middleres_hi) ++ op2 (double_size_mul_overflow_check_hi ++ double_size_mul_middleres_hi ++ double_size_mul_hihi) ++ op3 (double_size_mul_middleres_hi ++ double_size_mul_hihi ++ double_size_mul_overflow_check_hi) ++ (match (double_size_mul_candidate @0 @1 @2 @3) ++ (plus:c@2 ++ (plus:c@4 (double_size_mul_overflow_check_lo @0 @1 @3) (op1:c @0 @1)) ++ (plus:c@5 (op2:c @0 @1) (op3:c @0 @1))) ++ (if (single_use (@4) && single_use (@5)))) ++ (match (double_size_mul_candidate @0 @1 @2 @3) ++ (plus:c@2 (double_size_mul_overflow_check_lo @0 @1 @3) ++ (plus:c@4 (op1:c @0 @1) ++ (plus:c@5 (op2:c @0 @1) (op3:c @0 @1)))) ++ (if (single_use (@4) && single_use (@5)))) ++ (match (double_size_mul_candidate @0 @1 @2 @3) ++ (plus:c@2 (op1:c @0 @1) ++ (plus:c@4 (double_size_mul_overflow_check_lo @0 @1 @3) ++ (plus:c@5 (op2:c @0 @1) (op3:c @0 @1)))) ++ (if (single_use (@4) && single_use (@5)))) ++ (match (double_size_mul_candidate @0 @1 @2 @3) ++ (plus:c@2 (op1:c @0 @1) ++ (plus:c@4 (op2:c @0 @1) ++ (plus:c@5 (double_size_mul_overflow_check_lo @0 @1 @3) (op3:c @0 @1)))) ++ (if (single_use (@4) && single_use (@5))))) +diff --git a/gcc/testsuite/gcc.dg/double_sized_mul-1.c b/gcc/testsuite/gcc.dg/double_sized_mul-1.c +new file mode 100644 +index 000000000..4d475cc8a +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/double_sized_mul-1.c +@@ -0,0 +1,141 @@ ++/* { dg-do compile } */ ++/* fif-conversion-gimple and fuaddsub-overflow-match-all are required for ++ proper overflow detection in some cases. */ ++/* { dg-options "-O2 -fif-conversion-gimple -fuaddsub-overflow-match-all -fdump-tree-widening_mul-stats" } */ ++#include ++ ++typedef unsigned __int128 uint128_t; ++ ++uint16_t mul16 (uint8_t a, uint8_t b) ++{ ++ uint8_t a_lo = a & 0xF; ++ uint8_t b_lo = b & 0xF; ++ uint8_t a_hi = a >> 4; ++ uint8_t b_hi = b >> 4; ++ uint8_t lolo = a_lo * b_lo; ++ uint8_t lohi = a_lo * b_hi; ++ uint8_t hilo = a_hi * b_lo; ++ uint8_t hihi = a_hi * b_hi; ++ uint8_t middle = hilo + lohi; ++ uint8_t middle_hi = middle >> 4; ++ uint8_t middle_lo = middle << 4; ++ uint8_t res_lo = lolo + middle_lo; ++ uint8_t res_hi = hihi + middle_hi; ++ res_hi += (res_lo < middle_lo ? 1 : 0); ++ res_hi += (middle < hilo ? 0x10 : 0); ++ uint16_t res = ((uint16_t) res_hi) << 8; ++ res += res_lo; ++ return res; ++} ++ ++uint32_t mul32 (uint16_t a, uint16_t b) ++{ ++ uint16_t a_lo = a & 0xFF; ++ uint16_t b_lo = b & 0xFF; ++ uint16_t a_hi = a >> 8; ++ uint16_t b_hi = b >> 8; ++ uint16_t lolo = a_lo * b_lo; ++ uint16_t lohi = a_lo * b_hi; ++ uint16_t hilo = a_hi * b_lo; ++ uint16_t hihi = a_hi * b_hi; ++ uint16_t middle = hilo + lohi; ++ uint16_t middle_hi = middle >> 8; ++ uint16_t middle_lo = middle << 8; ++ uint16_t res_lo = lolo + middle_lo; ++ uint16_t res_hi = hihi + middle_hi; ++ res_hi += (res_lo < middle_lo ? 1 : 0); ++ res_hi += (middle < hilo ? 0x100 : 0); ++ uint32_t res = ((uint32_t) res_hi) << 16; ++ res += res_lo; ++ return res; ++} ++ ++uint64_t mul64 (uint32_t a, uint32_t b) ++{ ++ uint32_t a_lo = a & 0xFFFF; ++ uint32_t b_lo = b & 0xFFFF; ++ uint32_t a_hi = a >> 16; ++ uint32_t b_hi = b >> 16; ++ uint32_t lolo = a_lo * b_lo; ++ uint32_t lohi = a_lo * b_hi; ++ uint32_t hilo = a_hi * b_lo; ++ uint32_t hihi = a_hi * b_hi; ++ uint32_t middle = hilo + lohi; ++ uint32_t middle_hi = middle >> 16; ++ uint32_t middle_lo = middle << 16; ++ uint32_t res_lo = lolo + middle_lo; ++ uint32_t res_hi = hihi + middle_hi; ++ res_hi += (res_lo < middle_lo ? 1 : 0); ++ res_hi += (middle < hilo ? 0x10000 : 0); ++ uint64_t res = ((uint64_t) res_hi) << 32; ++ res += res_lo; ++ return res; ++} ++ ++uint128_t mul128 (uint64_t a, uint64_t b) ++{ ++ uint64_t a_lo = a & 0xFFFFFFFF; ++ uint64_t b_lo = b & 0xFFFFFFFF; ++ uint64_t a_hi = a >> 32; ++ uint64_t b_hi = b >> 32; ++ uint64_t lolo = a_lo * b_lo; ++ uint64_t lohi = a_lo * b_hi; ++ uint64_t hilo = a_hi * b_lo; ++ uint64_t hihi = a_hi * b_hi; ++ uint64_t middle = hilo + lohi; ++ uint64_t middle_hi = middle >> 32; ++ uint64_t middle_lo = middle << 32; ++ uint64_t res_lo = lolo + middle_lo; ++ uint64_t res_hi = hihi + middle_hi; ++ res_hi += (res_lo < middle_lo ? 1 : 0); ++ res_hi += (middle < hilo ? 0x100000000 : 0); ++ uint128_t res = ((uint128_t) res_hi) << 64; ++ res += res_lo; ++ return res; ++} ++ ++uint64_t mul64_perm (uint32_t a, uint32_t b) ++{ ++ uint32_t a_lo = a & 0xFFFF; ++ uint32_t b_lo = b & 0xFFFF; ++ uint32_t a_hi = a >> 16; ++ uint32_t b_hi = b >> 16; ++ uint32_t lolo = a_lo * b_lo; ++ uint32_t lohi = a_lo * b_hi; ++ uint32_t hilo = a_hi * b_lo; ++ uint32_t hihi = a_hi * b_hi; ++ uint32_t middle = hilo + lohi; ++ uint32_t middle_hi = middle >> 16; ++ uint32_t middle_lo = middle << 16; ++ uint32_t res_lo = lolo + middle_lo; ++ uint32_t res_hi = hihi + middle_hi; ++ res_hi = res_lo < middle_lo ? res_hi + 1 : res_hi; ++ res_hi = middle < hilo ? res_hi + 0x10000 : res_hi; ++ uint64_t res = ((uint64_t) res_hi) << 32; ++ res += res_lo; ++ return res; ++} ++ ++uint128_t mul128_perm (uint64_t a, uint64_t b) ++{ ++ uint64_t a_lo = a & 0xFFFFFFFF; ++ uint64_t b_lo = b & 0xFFFFFFFF; ++ uint64_t a_hi = a >> 32; ++ uint64_t b_hi = b >> 32; ++ uint64_t lolo = a_lo * b_lo; ++ uint64_t lohi = a_lo * b_hi; ++ uint64_t hilo = a_hi * b_lo; ++ uint64_t hihi = a_hi * b_hi; ++ uint64_t middle = hilo + lohi; ++ uint64_t middle_hi = middle >> 32; ++ uint64_t middle_lo = middle << 32; ++ uint64_t res_lo = lolo + middle_lo; ++ uint64_t res_hi = hihi + middle_hi; ++ res_hi = res_lo < middle_lo ? res_hi + 1 : res_hi; ++ res_hi = middle < hilo ? res_hi + 0x100000000 : res_hi; ++ uint128_t res = ((uint128_t) res_hi) << 64; ++ res += res_lo; ++ return res; ++} ++ ++/* { dg-final { scan-tree-dump-times "double sized mul optimized: 1" 6 "widening_mul" } } */ +diff --git a/gcc/testsuite/gcc.dg/double_sized_mul-2.c b/gcc/testsuite/gcc.dg/double_sized_mul-2.c +new file mode 100644 +index 000000000..cc6e5af25 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/double_sized_mul-2.c +@@ -0,0 +1,62 @@ ++/* { dg-do compile } */ ++/* fif-conversion-gimple is required for proper overflow detection ++ in some cases. */ ++/* { dg-options "-O2 -fif-conversion-gimple -fuaddsub-overflow-match-all -fdump-tree-widening_mul-stats" } */ ++#include ++ ++typedef unsigned __int128 uint128_t; ++typedef struct uint256_t ++{ ++ uint128_t lo; ++ uint128_t hi; ++} uint256_t; ++ ++uint64_t mul64_double_use (uint32_t a, uint32_t b) ++{ ++ uint32_t a_lo = a & 0xFFFF; ++ uint32_t b_lo = b & 0xFFFF; ++ uint32_t a_hi = a >> 16; ++ uint32_t b_hi = b >> 16; ++ uint32_t lolo = a_lo * b_lo; ++ uint32_t lohi = a_lo * b_hi; ++ uint32_t hilo = a_hi * b_lo; ++ uint32_t hihi = a_hi * b_hi; ++ uint32_t middle = hilo + lohi; ++ uint32_t middle_hi = middle >> 16; ++ uint32_t middle_lo = middle << 16; ++ uint32_t res_lo = lolo + middle_lo; ++ uint32_t res_hi = hihi + middle_hi; ++ res_hi += (res_lo < middle_lo ? 1 : 0); ++ res_hi += (middle < hilo ? 0x10000 : 0); ++ uint64_t res = ((uint64_t) res_hi) << 32; ++ res += res_lo; ++ return res + lolo; ++} ++ ++uint256_t mul256 (uint128_t a, uint128_t b) ++{ ++ uint128_t a_lo = a & 0xFFFFFFFFFFFFFFFF; ++ uint128_t b_lo = b & 0xFFFFFFFFFFFFFFFF; ++ uint128_t a_hi = a >> 64; ++ uint128_t b_hi = b >> 64; ++ uint128_t lolo = a_lo * b_lo; ++ uint128_t lohi = a_lo * b_hi; ++ uint128_t hilo = a_hi * b_lo; ++ uint128_t hihi = a_hi * b_hi; ++ uint128_t middle = hilo + lohi; ++ uint128_t middle_hi = middle >> 64; ++ uint128_t middle_lo = middle << 64; ++ uint128_t res_lo = lolo + middle_lo; ++ uint128_t res_hi = hihi + middle_hi; ++ res_hi += (res_lo < middle_lo ? 1 : 0); ++ /* Constant is to big warning WA */ ++ uint128_t overflow_tmp = (middle < hilo ? 1 : 0); ++ overflow_tmp <<= 64; ++ res_hi += overflow_tmp; ++ uint256_t res; ++ res.lo = res_lo; ++ res.hi = res_hi; ++ return res; ++} ++ ++/* { dg-final { scan-tree-dump-not "double sized mul optimized" "widening_mul" } } */ +diff --git a/gcc/tree-ssa-math-opts.cc b/gcc/tree-ssa-math-opts.cc +index 55d6ee8ae..2c06b8a60 100644 +--- a/gcc/tree-ssa-math-opts.cc ++++ b/gcc/tree-ssa-math-opts.cc +@@ -210,6 +210,9 @@ static struct + + /* Number of highpart multiplication ops inserted. */ + int highpart_mults_inserted; ++ ++ /* Number of optimized double sized multiplications. */ ++ int double_sized_mul_optimized; + } widen_mul_stats; + + /* The instance of "struct occurrence" representing the highest +@@ -4893,6 +4896,78 @@ optimize_spaceship (gimple *stmt) + } + + ++/* Pattern matcher for double sized multiplication defined in match.pd. */ ++extern bool gimple_double_size_mul_candidate (tree, tree*, tree (*)(tree)); ++ ++static bool ++convert_double_size_mul (gimple_stmt_iterator *gsi, gimple *stmt) ++{ ++ gimple *use_stmt, *complex_res_lo; ++ gimple_stmt_iterator insert_before; ++ imm_use_iterator use_iter; ++ tree match[4]; // arg0, arg1, res_hi, complex_res_lo ++ tree arg0, arg1, widen_mult, new_type, tmp; ++ tree lhs = gimple_assign_lhs (stmt); ++ location_t loc = UNKNOWN_LOCATION; ++ machine_mode mode; ++ ++ if (!gimple_double_size_mul_candidate (lhs, match, NULL)) ++ return false; ++ ++ new_type = build_nonstandard_integer_type ( ++ TYPE_PRECISION (TREE_TYPE (match[0])) * 2, 1); ++ mode = TYPE_MODE (new_type); ++ ++ /* Early return if the target multiplication doesn't exist on target. */ ++ if (optab_handler (smul_optab, mode) == CODE_FOR_nothing ++ && !wider_optab_check_p (smul_optab, mode, 1)) ++ return false; ++ ++ /* Determine the point where the wide multiplication ++ should be inserted. Complex low res is OK since it is required ++ by both high and low part getters, thus it dominates both of them. */ ++ complex_res_lo = SSA_NAME_DEF_STMT (match[3]); ++ insert_before = gsi_for_stmt (complex_res_lo); ++ gsi_next (&insert_before); ++ ++ /* Create the widen multiplication. */ ++ arg0 = build_and_insert_cast (&insert_before, loc, new_type, match[0]); ++ arg1 = build_and_insert_cast (&insert_before, loc, new_type, match[1]); ++ widen_mult = build_and_insert_binop (&insert_before, loc, "widen_mult", ++ MULT_EXPR, arg0, arg1); ++ ++ /* Find the mult low part getter. */ ++ FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, match[3]) ++ if (gimple_assign_rhs_code (use_stmt) == REALPART_EXPR) ++ break; ++ ++ /* Create high and low (if needed) parts extractors. */ ++ /* Low part. */ ++ if (use_stmt) ++ { ++ loc = gimple_location (use_stmt); ++ tmp = build_and_insert_cast (&insert_before, loc, ++ TREE_TYPE (gimple_get_lhs (use_stmt)), ++ widen_mult); ++ gassign *new_stmt = gimple_build_assign (gimple_get_lhs (use_stmt), ++ NOP_EXPR, tmp); ++ gsi_replace (&insert_before, new_stmt, true); ++ } ++ ++ /* High part. */ ++ loc = gimple_location (stmt); ++ tmp = build_and_insert_binop (gsi, loc, "widen_mult_hi", ++ RSHIFT_EXPR, widen_mult, ++ build_int_cst (new_type, ++ TYPE_PRECISION (new_type) / 2)); ++ tmp = build_and_insert_cast (gsi, loc, TREE_TYPE (lhs), tmp); ++ gassign *new_stmt = gimple_build_assign (lhs, NOP_EXPR, tmp); ++ gsi_replace (gsi, new_stmt, true); ++ ++ widen_mul_stats.double_sized_mul_optimized++; ++ return true; ++} ++ + /* Find integer multiplications where the operands are extended from + smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR + or MULT_HIGHPART_EXPR where appropriate. */ +@@ -4987,6 +5062,9 @@ math_opts_dom_walker::after_dom_children (basic_block bb) + break; + + case PLUS_EXPR: ++ if (convert_double_size_mul (&gsi, stmt)) ++ break; ++ __attribute__ ((fallthrough)); + case MINUS_EXPR: + if (!convert_plusminus_to_widen (&gsi, stmt, code)) + match_arith_overflow (&gsi, stmt, code, m_cfg_changed_p); +@@ -5091,6 +5169,8 @@ pass_optimize_widening_mul::execute (function *fun) + widen_mul_stats.divmod_calls_inserted); + statistics_counter_event (fun, "highpart multiplications inserted", + widen_mul_stats.highpart_mults_inserted); ++ statistics_counter_event (fun, "double sized mul optimized", ++ widen_mul_stats.double_sized_mul_optimized); + + return cfg_changed ? TODO_cleanup_cfg : 0; + } +-- +2.33.0 + diff --git a/0040-Port-icp-patch-to-GCC-12.patch b/0040-Port-icp-patch-to-GCC-12.patch new file mode 100644 index 0000000000000000000000000000000000000000..d0b34126ce497f8f912ef10b2815735d5b7650a7 --- /dev/null +++ b/0040-Port-icp-patch-to-GCC-12.patch @@ -0,0 +1,2387 @@ +From b73462757734c62f64e7a4379340679ec6f19669 Mon Sep 17 00:00:00 2001 +From: Diachkov Ilia +Date: Tue, 27 Feb 2024 07:28:12 +0800 +Subject: [PATCH 06/18] Port icp patch to GCC 12 + +--- + gcc/common.opt | 8 + + gcc/dbgcnt.def | 1 + + gcc/ipa-devirt.cc | 1855 +++++++++++++++++++++++++++++++++++ + gcc/passes.def | 1 + + gcc/testsuite/gcc.dg/icp1.c | 40 + + gcc/testsuite/gcc.dg/icp2.c | 38 + + gcc/testsuite/gcc.dg/icp3.c | 52 + + gcc/testsuite/gcc.dg/icp4.c | 55 ++ + gcc/testsuite/gcc.dg/icp5.c | 66 ++ + gcc/testsuite/gcc.dg/icp6.c | 66 ++ + gcc/testsuite/gcc.dg/icp7.c | 48 + + gcc/timevar.def | 1 + + gcc/tree-pass.h | 1 + + 13 files changed, 2232 insertions(+) + create mode 100644 gcc/testsuite/gcc.dg/icp1.c + create mode 100644 gcc/testsuite/gcc.dg/icp2.c + create mode 100644 gcc/testsuite/gcc.dg/icp3.c + create mode 100644 gcc/testsuite/gcc.dg/icp4.c + create mode 100644 gcc/testsuite/gcc.dg/icp5.c + create mode 100644 gcc/testsuite/gcc.dg/icp6.c + create mode 100644 gcc/testsuite/gcc.dg/icp7.c + +diff --git a/gcc/common.opt b/gcc/common.opt +index 39c90604e..16aadccf6 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1316,6 +1316,14 @@ fdevirtualize + Common Var(flag_devirtualize) Optimization + Try to convert virtual calls to direct ones. + ++ficp ++Common Var(flag_icp) Optimization Init(0) ++Try to promote indirect calls to direct ones. ++ ++ficp-speculatively ++Common Var(flag_icp_speculatively) Optimization ++Promote indirect calls speculatively. ++ + fdiagnostics-show-location= + Common Joined RejectNegative Enum(diagnostic_prefixing_rule) + -fdiagnostics-show-location=[once|every-line] How often to emit source location at the beginning of line-wrapped diagnostics. +diff --git a/gcc/dbgcnt.def b/gcc/dbgcnt.def +index 3aa18cd0c..a00bbc31b 100644 +--- a/gcc/dbgcnt.def ++++ b/gcc/dbgcnt.def +@@ -170,6 +170,7 @@ DEBUG_COUNTER (graphite_scop) + DEBUG_COUNTER (hoist) + DEBUG_COUNTER (hoist_insn) + DEBUG_COUNTER (ia64_sched2) ++DEBUG_COUNTER (icp) + DEBUG_COUNTER (if_after_combine) + DEBUG_COUNTER (if_after_reload) + DEBUG_COUNTER (if_conversion) +diff --git a/gcc/ipa-devirt.cc b/gcc/ipa-devirt.cc +index 74fe65608..383839189 100644 +--- a/gcc/ipa-devirt.cc ++++ b/gcc/ipa-devirt.cc +@@ -103,9 +103,14 @@ along with GCC; see the file COPYING3. If not see + indirect polymorphic edge all possible polymorphic call targets of the call. + + pass_ipa_devirt performs simple speculative devirtualization. ++ pass_ipa_icp performs simple indirect call promotion. + */ + + #include "config.h" ++#define INCLUDE_ALGORITHM ++#define INCLUDE_SET ++#define INCLUDE_MAP ++#define INCLUDE_LIST + #include "system.h" + #include "coretypes.h" + #include "backend.h" +@@ -127,6 +132,7 @@ along with GCC; see the file COPYING3. If not see + #include "ipa-fnsummary.h" + #include "demangle.h" + #include "dbgcnt.h" ++#include "gimple-iterator.h" + #include "gimple-pretty-print.h" + #include "intl.h" + #include "stringpool.h" +@@ -4401,5 +4407,1854 @@ make_pass_ipa_odr (gcc::context *ctxt) + return new pass_ipa_odr (ctxt); + } + ++/* Function signature map used to look up function decl which corresponds to ++ the given function type. */ ++typedef std::set type_set; ++typedef std::set decl_set; ++typedef std::map type_alias_map; ++typedef std::map type_decl_map; ++typedef std::map uid_to_type_map; ++typedef std::map type_map; ++ ++static bool has_address_taken_functions_with_varargs = false; ++static type_set *unsafe_types = NULL; ++static type_alias_map *fta_map = NULL; ++static type_alias_map *ta_map = NULL; ++static type_map *ctype_map = NULL; ++static type_alias_map *cbase_to_ptype = NULL; ++static type_decl_map *fs_map = NULL; ++static uid_to_type_map *type_uid_map = NULL; ++ ++static void ++print_type_set(unsigned ftype_uid, type_alias_map *map) ++{ ++ if (!map->count (ftype_uid)) ++ return; ++ type_set* s = (*map)[ftype_uid]; ++ for (type_set::const_iterator it = s->begin (); it != s->end (); it++) ++ fprintf (dump_file, it == s->begin () ? "%d" : ", %d", *it); ++} ++ ++static void ++dump_type_with_uid (const char *msg, tree type, dump_flags_t flags = TDF_NONE) ++{ ++ fprintf (dump_file, msg); ++ print_generic_expr (dump_file, type, flags); ++ fprintf (dump_file, " (%d)\n", TYPE_UID (type)); ++} ++ ++/* Walk aggregate type and collect types of scalar elements. */ ++ ++static void ++collect_scalar_types (tree tp, std::list &types) ++{ ++ /* TODO: take into account different field offsets. ++ Also support array casts. */ ++ if (tp && dump_file && (dump_flags & TDF_DETAILS)) ++ dump_type_with_uid ("Walk var's type: ", tp, TDF_UID); ++ if (RECORD_OR_UNION_TYPE_P (tp)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Record's fields {\n"); ++ for (tree field = TYPE_FIELDS (tp); field; ++ field = DECL_CHAIN (field)) ++ { ++ if (TREE_CODE (field) != FIELD_DECL) ++ continue; ++ collect_scalar_types (TREE_TYPE (field), types); ++ } ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "}\n"); ++ return; ++ } ++ if (TREE_CODE (tp) == ARRAY_TYPE) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Array's innermost type:\n"); ++ /* Take the innermost component type. */ ++ tree elt; ++ for (elt = TREE_TYPE (tp); TREE_CODE (elt) == ARRAY_TYPE; ++ elt = TREE_TYPE (elt)) ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ print_generic_expr (dump_file, elt); ++ collect_scalar_types (elt, types); ++ return; ++ } ++ types.push_back (tp); ++} ++ ++static void maybe_register_aliases (tree type1, tree type2); ++ ++/* Walk type lists and maybe register type aliases. */ ++ ++static void ++compare_type_lists (std::list tlist1, std::list tlist2) ++{ ++ for (std::list::iterator ti1 = tlist1.begin (), ti2 = tlist2.begin (); ++ ti1 != tlist1.end (); ++ti1, ++ti2) ++ { ++ /* TODO: correct the analysis results if lists have different length. */ ++ if (ti2 == tlist2.end ()) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Type lists with different length!\n"); ++ break; ++ } ++ maybe_register_aliases (*ti1, *ti2); ++ } ++} ++ ++/* For two given types collect scalar element types and ++ compare the result lists to find type aliases. */ ++ ++static void ++collect_scalar_types_and_find_aliases (tree t1, tree t2) ++{ ++ std::list tlist1; ++ std::list tlist2; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "First type list: "); ++ collect_scalar_types (t1, tlist1); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Second type list: "); ++ collect_scalar_types (t2, tlist2); ++ compare_type_lists (tlist1, tlist2); ++} ++ ++/* Dump type with the corresponding set from the map. */ ++ ++static void ++dump_type_uid_with_set (const char *msg, tree type, type_alias_map *map, ++ bool dump_type = true, bool with_newline = true) ++{ ++ fprintf (dump_file, msg, TYPE_UID (type)); ++ if (dump_type) ++ print_generic_expr (dump_file, type); ++ fprintf (dump_file, " ("); ++ print_type_set (TYPE_UID (type), map); ++ fprintf (dump_file, ")"); ++ fprintf (dump_file, with_newline ? "\n" : " "); ++} ++ ++static void ++dump_two_types_uids_with_set (const char *msg, unsigned t1_uid, ++ unsigned t2_uid, type_alias_map *map) ++{ ++ fprintf (dump_file, msg, t1_uid, t2_uid); ++ fprintf (dump_file, " ("); ++ print_type_set (t1_uid, map); ++ fprintf (dump_file, ")\n"); ++} ++ ++/* Register type aliases in the map. Return true if new alias ++ is registered. */ ++ ++static bool ++register_ailas_type (tree type, tree alias_type, type_alias_map *map, ++ bool only_merge = false) ++{ ++ /* TODO: maybe support the case with one missed type. */ ++ if (!type || !alias_type) ++ return false; ++ unsigned type_uid = TYPE_UID (type); ++ unsigned alias_type_uid = TYPE_UID (alias_type); ++ if (type_uid_map->count (type_uid) == 0) ++ (*type_uid_map)[type_uid] = type; ++ if (type_uid_map->count (alias_type_uid) == 0) ++ (*type_uid_map)[alias_type_uid] = alias_type; ++ ++ if (map->count (type_uid) == 0 && map->count (alias_type_uid) == 0) ++ { ++ (*map)[type_uid] = new type_set (); ++ (*map)[alias_type_uid] = (*map)[type_uid]; ++ } ++ else if (map->count (type_uid) == 0) ++ (*map)[type_uid] = (*map)[alias_type_uid]; ++ else if (map->count (alias_type_uid) == 0) ++ (*map)[alias_type_uid] = (*map)[type_uid]; ++ else if (map->count (type_uid) && map->count (alias_type_uid)) ++ { ++ if ((*map)[type_uid] == (*map)[alias_type_uid]) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_two_types_uids_with_set ("Types (%d) and (%d) are already in", ++ type_uid, alias_type_uid, map); ++ return false; ++ } ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ dump_type_uid_with_set ("T1 (%d) in set", type, map, false, true); ++ dump_type_uid_with_set ("T2 (%d) in set", alias_type, map, ++ false, true); ++ } ++ (*map)[type_uid]->insert ((*map)[alias_type_uid]->begin (), ++ (*map)[alias_type_uid]->end ()); ++ type_set *type_set = (*map)[alias_type_uid]; ++ for (type_set::const_iterator it1 = type_set->begin (); ++ it1 != type_set->end (); ++it1) ++ (*map)[*it1] = (*map)[type_uid]; ++ delete type_set; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "MERGE: "); ++ } ++ if (!only_merge) ++ { ++ (*map)[type_uid]->insert (alias_type_uid); ++ (*map)[type_uid]->insert (type_uid); ++ } ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_two_types_uids_with_set ("Insert types (%d) and (%d) into set", ++ type_uid, alias_type_uid, map); ++ return true; ++} ++ ++static void ++dump_two_types_with_uids (const char *msg, tree t1, tree t2) ++{ ++ fprintf (dump_file, msg); ++ print_generic_expr (dump_file, t1, TDF_UID); ++ fprintf (dump_file, " (%d), ", TYPE_UID (t1)); ++ print_generic_expr (dump_file, t2, TDF_UID); ++ fprintf (dump_file, " (%d)\n", TYPE_UID (t2)); ++} ++ ++static void ++analyze_pointees (tree type1, tree type2) ++{ ++ gcc_assert (POINTER_TYPE_P (type1) && POINTER_TYPE_P (type2)); ++ tree base1 = TREE_TYPE (type1); ++ tree base2 = TREE_TYPE (type2); ++ /* TODO: maybe analyze void pointers. */ ++ if (VOID_TYPE_P(base1) || VOID_TYPE_P(base2)) ++ return; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_two_types_with_uids ("Walk pointee types: ", base1, base2); ++ collect_scalar_types_and_find_aliases (base1, base2); ++} ++ ++static void ++map_canonical_base_to_pointer (tree type, tree to_insert) ++{ ++ type = TYPE_MAIN_VARIANT (type); ++ tree base_type = TREE_TYPE (type); ++ tree cbase_type = TYPE_CANONICAL (base_type); ++ if (!cbase_type) ++ return; ++ unsigned cbase_type_uid = TYPE_UID (cbase_type); ++ if (type_uid_map->count (cbase_type_uid) == 0) ++ (*type_uid_map)[cbase_type_uid] = cbase_type; ++ ++ if (cbase_to_ptype->count (cbase_type_uid) == 0) ++ { ++ (*cbase_to_ptype)[cbase_type_uid] = new type_set (); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "New map cb-to-p=(%d): ", cbase_type_uid); ++ } ++ else if (!(*cbase_to_ptype)[cbase_type_uid]->count (TYPE_UID (to_insert))) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Found map cb-to-p=(%d): ", cbase_type_uid); ++ } ++ else ++ return; ++ /* Add all variants of 'to_insert' type. */ ++ for (tree t = to_insert; t; t = TYPE_NEXT_VARIANT (t)) ++ { ++ unsigned t_uid = TYPE_UID (t); ++ if (!(*cbase_to_ptype)[cbase_type_uid]->count (t_uid)) ++ { ++ (*cbase_to_ptype)[cbase_type_uid]->insert (t_uid); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "(%d) ", t_uid); ++ } ++ if (type_uid_map->count (t_uid) == 0) ++ (*type_uid_map)[t_uid] = t; ++ } ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "\n"); ++} ++ ++/* Analyse two types and maybe register them as aliases. Also collect ++ unsafe function types and map canonical base types to corresponding ++ pointer types. */ ++ ++static void ++maybe_register_aliases (tree type1, tree type2) ++{ ++ if (type1 && POINTER_TYPE_P (type1) && !FUNCTION_POINTER_TYPE_P (type1)) ++ map_canonical_base_to_pointer (type1, type1); ++ if (type2 && POINTER_TYPE_P (type2) && !FUNCTION_POINTER_TYPE_P (type2)) ++ map_canonical_base_to_pointer (type2, type2); ++ ++ if (type1 == type2 || !type1 || !type2) ++ return; ++ ++ if (POINTER_TYPE_P (type1) && POINTER_TYPE_P (type2)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_two_types_with_uids ("Pointer types: ", type1, type2); ++ if (register_ailas_type (type1, type2, ta_map)) ++ analyze_pointees (type1, type2); ++ } ++ /* If function and non-function type pointers alias, ++ the function type is unsafe. */ ++ if (FUNCTION_POINTER_TYPE_P (type1) && !FUNCTION_POINTER_TYPE_P (type2)) ++ unsafe_types->insert (TYPE_UID (type1)); ++ if (FUNCTION_POINTER_TYPE_P (type2) && !FUNCTION_POINTER_TYPE_P (type1)) ++ unsafe_types->insert (TYPE_UID (type2)); ++ ++ /* Try to figure out with pointers to incomplete types. */ ++ if (POINTER_TYPE_P (type1) && POINTER_TYPE_P (type2)) ++ { ++ type1 = TYPE_MAIN_VARIANT (type1); ++ type2 = TYPE_MAIN_VARIANT (type2); ++ tree base1 = TREE_TYPE (type1); ++ tree base2 = TREE_TYPE (type2); ++ if (RECORD_OR_UNION_TYPE_P (base1) && RECORD_OR_UNION_TYPE_P (base2)) ++ { ++ tree cb1 = TYPE_CANONICAL (base1); ++ tree cb2 = TYPE_CANONICAL (base2); ++ if (cb1 && !cb2) ++ map_canonical_base_to_pointer (type1, type2); ++ if (cb2 && !cb1) ++ map_canonical_base_to_pointer (type2, type1); ++ } ++ } ++} ++ ++/* Maybe register non-void/equal type aliases. */ ++ ++static void ++maybe_register_non_void_aliases (tree t1, tree t2) ++{ ++ gcc_assert (t1 && t2); ++ if (type_uid_map->count (TYPE_UID (t1)) == 0) ++ (*type_uid_map)[TYPE_UID (t1)] = t1; ++ if (type_uid_map->count (TYPE_UID (t2)) == 0) ++ (*type_uid_map)[TYPE_UID (t2)] = t2; ++ ++ /* Skip equal and void types. */ ++ if (t1 == t2 || VOID_TYPE_P (t1) || VOID_TYPE_P (t2)) ++ return; ++ maybe_register_aliases (t1, t2); ++} ++ ++/* Detect function type in call stmt. */ ++ ++static tree ++get_call_fntype (gcall *stmt) ++{ ++ tree fntype = NULL; ++ if (gimple_call_fndecl (stmt) && TREE_TYPE (gimple_call_fndecl (stmt))) ++ fntype = TREE_TYPE (gimple_call_fndecl (stmt)); ++ else ++ { ++ tree call_fn = gimple_call_fn (stmt); ++ tree ptype = TREE_TYPE (call_fn); ++ gcc_assert (ptype && TREE_TYPE (ptype)); ++ fntype = TREE_TYPE (ptype); ++ } ++ gcc_assert (fntype && fntype != void_type_node ++ && (TREE_CODE (fntype) == FUNCTION_TYPE ++ || TREE_CODE (fntype) == METHOD_TYPE)); ++ return fntype; ++} ++ ++static void ++dump_global_var (tree decl) ++{ ++ fprintf (dump_file, "Analyze global var: "); ++ print_generic_decl (dump_file, decl, TDF_NONE); ++ fprintf (dump_file, "\n"); ++} ++ ++static void ++collect_block_elt_types (tree tp, std::list &types, tree block) ++{ ++ tree vt = TREE_TYPE (tp); ++ gcc_assert (vt); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ const char *msg = TREE_CODE (block) == BLOCK ? "VAR's block: " : ++ "VAR's ctor: "; ++ fprintf (dump_file, msg); ++ print_generic_expr (dump_file, tp); ++ dump_type_with_uid (" with type ", vt); ++ } ++ collect_scalar_types (vt, types); ++} ++ ++/* Compare types of initialization block's or constructor's elements and ++ fields of the initializer type to find type aliases. */ ++ ++static void ++compare_block_and_init_type (tree block, tree t1) ++{ ++ std::list tlist1; ++ std::list tlist2; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Init's type list: "); ++ collect_scalar_types (t1, tlist1); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Block's type list: "); ++ if (TREE_CODE (block) == CONSTRUCTOR) ++ { ++ unsigned HOST_WIDE_INT idx; ++ tree value; ++ FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (block), idx, value) ++ { ++ gcc_assert (value); ++ collect_block_elt_types (value, tlist2, block); ++ } ++ } ++ else if (TREE_CODE (block) == BLOCK) ++ for (tree var = BLOCK_VARS (block); var; var = DECL_CHAIN (var)) ++ { ++ if (TREE_CODE (var) != VAR_DECL) ++ continue; ++ collect_block_elt_types (var, tlist2, block); ++ } ++ else ++ gcc_unreachable (); ++ compare_type_lists (tlist1, tlist2); ++} ++ ++/* Analyze global var to find type aliases comparing types of var and ++ initializer elements. */ ++ ++static void ++analyze_global_var (varpool_node *var) ++{ ++ var->get_constructor(); ++ tree decl = var->decl; ++ if (TREE_CODE (decl) == SSA_NAME || !DECL_INITIAL (decl) ++ || integer_zerop (DECL_INITIAL (decl))) ++ return; ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_global_var (decl); ++ tree var_type = TREE_TYPE (decl); ++ tree init_type = TREE_TYPE (DECL_INITIAL (decl)); ++ gcc_assert (var_type && init_type); ++ if (RECORD_OR_UNION_TYPE_P (init_type) ++ && !initializer_zerop (DECL_INITIAL (decl))) ++ compare_block_and_init_type (DECL_INITIAL (decl), init_type); ++ else if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Is not a record with nonzero init\n"); ++ ++ if (var_type == init_type) ++ return; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_two_types_with_uids ("Mismatch of var and init types: ", ++ var_type, init_type); ++ collect_scalar_types_and_find_aliases (var_type, init_type); ++} ++ ++static void ++dump_function_node_info (struct cgraph_node *n) ++{ ++ fprintf (dump_file, "\nAnalyse function node: "); ++ print_generic_expr (dump_file, n->decl); ++ fprintf (dump_file, "\n"); ++ tree fndecl_type = TREE_TYPE (n->decl); ++ dump_type_with_uid ("Function decl type: ", fndecl_type, TDF_UID); ++ if (TREE_TYPE (fndecl_type)) ++ dump_type_with_uid ("Return type: ", TREE_TYPE (fndecl_type)); ++ tree argt = TYPE_ARG_TYPES (fndecl_type); ++ for (unsigned i = 1; argt && argt != void_type_node ++ && !VOID_TYPE_P (TREE_VALUE (argt)); ++i, argt = TREE_CHAIN (argt)) ++ { ++ tree atype = TREE_VALUE (argt); ++ fprintf (dump_file, "%d-arg type: ", i); ++ dump_type_with_uid ("", atype); ++ } ++ fprintf (dump_file, "\n"); ++} ++ ++static void ++dump_call_stmt_info (gcall *stmt, tree fntype) ++{ ++ fprintf (dump_file, "\nAnalyse call stmt: "); ++ if (stmt) ++ print_gimple_stmt (dump_file, stmt, 3, TDF_DETAILS); ++ else ++ fprintf (dump_file, "(no stmt)\n"); ++ dump_type_with_uid ("fntype=", fntype, TDF_UID); ++ if (gimple_call_fntype (stmt)) ++ dump_type_with_uid ("fntype1=", gimple_call_fntype (stmt), TDF_UID); ++ if (gimple_call_fndecl (stmt) && TREE_TYPE (gimple_call_fndecl (stmt))) ++ dump_type_with_uid ("fntype2=", TREE_TYPE (gimple_call_fndecl (stmt)), ++ TDF_UID); ++} ++ ++/* Dump actual and formal arg types. */ ++ ++static void ++dump_arg_types_with_uids (int i, tree t1, tree t2) ++{ ++ if (i >= 0) ++ fprintf (dump_file, "Call's %d-arg types: ", i); ++ else ++ fprintf (dump_file, "Call's return types: "); ++ fprintf (dump_file, "(%d) and (%d) ", TYPE_UID (t1), TYPE_UID (t2)); ++ print_generic_expr (dump_file, t1, TDF_UID); ++ fprintf (dump_file, " "); ++ print_generic_expr (dump_file, t2, TDF_UID); ++ fprintf (dump_file, "\n"); ++} ++ ++/* Analyze call graph edge with connected call stmt to find type aliases in ++ arguments and return value casts. */ ++ ++static void ++analyze_cgraph_edge (cgraph_edge *e) ++{ ++ gcall *stmt = e->call_stmt; ++ gcc_assert (stmt != NULL); ++ tree fntype = get_call_fntype (stmt); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_call_stmt_info (stmt, fntype); ++ if (gimple_has_lhs (stmt)) ++ { ++ tree t1 = TREE_TYPE (gimple_call_lhs (stmt)); ++ tree t2 = TREE_TYPE (fntype); ++ const int is_return_arg = -1; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_arg_types_with_uids (is_return_arg, t1, t2); ++ maybe_register_non_void_aliases (t1, t2); ++ } ++ ++ tree argt = TYPE_ARG_TYPES (fntype); ++ if (!argt) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Finish call stmt analysis\n"); ++ return; ++ } ++ gcc_assert (argt); ++ unsigned num_args = gimple_call_num_args (stmt); ++ for (unsigned i = 0; i < num_args && argt; ++i, argt = TREE_CHAIN (argt)) ++ { ++ tree arg = gimple_call_arg (stmt, i); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_arg_types_with_uids (i, TREE_VALUE (argt), TREE_TYPE (arg)); ++ if (TREE_VALUE (argt) == TREE_TYPE (arg) ++ || !POINTER_TYPE_P (TREE_VALUE (argt)) ++ || !POINTER_TYPE_P (TREE_TYPE (arg))) ++ continue; ++ maybe_register_non_void_aliases (TREE_VALUE (argt), TREE_TYPE (arg)); ++ tree t1 = TREE_TYPE (TREE_VALUE (argt)); ++ tree t2 = TREE_TYPE (TREE_TYPE (arg)); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Call's %d-arg base types: (%d) and (%d)\n", ++ i, (t1 ? TYPE_UID (t1) : 0), (t2 ? TYPE_UID (t2) : 0)); ++ maybe_register_non_void_aliases (t1, t2); ++ } ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "End list of args\n"); ++ tree fndecl_type = NULL; ++ if (e->callee && e->callee->decl) ++ fndecl_type = TREE_TYPE (e->callee->decl); ++ if (fndecl_type && fndecl_type != fntype) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Function decl and edge types mismatch:\n"); ++ register_ailas_type (fntype, fndecl_type, fta_map); ++ } ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "End call stmt analysis\n"); ++} ++ ++static void ++dump_assign_info (gimple *stmt, tree rhs, tree lhs_type, tree rhs_type) ++{ ++ fprintf (dump_file, "\nAnalyse assign cast/copy stmt, rhs=%s: ", ++ get_tree_code_name (TREE_CODE (rhs))); ++ print_gimple_stmt (dump_file, stmt, 3, TDF_DETAILS); ++ fprintf (dump_file, "Types: "); ++ print_generic_expr (dump_file, lhs_type); ++ fprintf (dump_file, ", "); ++ print_generic_expr (dump_file, rhs_type); ++ fprintf (dump_file, "\n"); ++} ++ ++/* Analyze cast/copy assign stmt to find type aliases. */ ++ ++static void ++analyze_assign_stmt (gimple *stmt) ++{ ++ gcc_assert (is_gimple_assign (stmt)); ++ tree rhs_type = NULL_TREE; ++ tree lhs_type = TREE_TYPE (gimple_assign_lhs (stmt)); ++ tree rhs = gimple_assign_rhs1 (stmt); ++ if (TREE_CODE (rhs) == MEM_REF) ++ { ++ rhs = TREE_OPERAND (rhs, 0); ++ tree ptr_type = TREE_TYPE (rhs); ++ gcc_assert (POINTER_TYPE_P (ptr_type)); ++ rhs_type = TREE_TYPE (ptr_type); ++ } ++ else if (TREE_CODE (rhs) == ADDR_EXPR) ++ { ++ rhs = TREE_OPERAND (rhs, 0); ++ if (VAR_OR_FUNCTION_DECL_P (rhs) || TREE_CODE (rhs) == STRING_CST ++ || TREE_CODE (rhs) == ARRAY_REF || TREE_CODE (rhs) == PARM_DECL) ++ rhs_type = build_pointer_type (TREE_TYPE (rhs)); ++ else if (TREE_CODE (rhs) == COMPONENT_REF) ++ { ++ rhs = TREE_OPERAND (rhs, 1); ++ rhs_type = build_pointer_type (TREE_TYPE (rhs)); ++ } ++ else if (TREE_CODE (rhs) == MEM_REF) ++ { ++ rhs = TREE_OPERAND (rhs, 0); ++ rhs_type = TREE_TYPE (rhs); ++ gcc_assert (POINTER_TYPE_P (rhs_type)); ++ } ++ else ++ gcc_unreachable(); ++ } ++ else ++ rhs_type = TREE_TYPE (rhs); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_assign_info (stmt, rhs, lhs_type, rhs_type); ++ if (CONSTANT_CLASS_P (rhs) && !zerop (rhs) ++ && FUNCTION_POINTER_TYPE_P (TREE_TYPE (rhs))) ++ { ++ tree ftype = TREE_TYPE (rhs_type); ++ unsafe_types->insert (TYPE_UID (ftype)); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Function type (%d) is unsafe due to assign " ++ "non-zero cst to function pointer\n", TYPE_UID (ftype)); ++ } ++ maybe_register_non_void_aliases (lhs_type, rhs_type); ++} ++ ++/* Walk all fn's stmt to analyze assigns. */ ++ ++static void ++analyze_assigns (function* fn) ++{ ++ push_cfun (fn); ++ basic_block bb; ++ gimple_stmt_iterator si; ++ FOR_EACH_BB_FN (bb, fn) ++ for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) ++ { ++ gimple *stmt = gsi_stmt (si); ++ if (!gimple_assign_cast_p (stmt) && !gimple_assign_copy_p (stmt)) ++ continue; ++ analyze_assign_stmt (stmt); ++ } ++ pop_cfun (); ++} ++ ++/* Walk all functions to collect sets of type aliases. */ ++ ++static void ++collect_type_alias_sets () ++{ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "\n\nCollect type alias sets walking global vars.\n"); ++ ++ varpool_node *var; ++ FOR_EACH_VARIABLE (var) ++ if (var->real_symbol_p ()) ++ analyze_global_var (var); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "\nCollect type alias sets walking functions.\n"); ++ ++ struct cgraph_node *n; ++ FOR_EACH_FUNCTION (n) ++ { ++ if (!n->has_gimple_body_p ()) ++ continue; ++ n->get_body (); ++ function *fn = DECL_STRUCT_FUNCTION (n->decl); ++ if (!fn) ++ continue; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_function_node_info (n); ++ /* Analyze direct/indirect function calls. */ ++ for (cgraph_edge *e = n->callees; e; e = e->next_callee) ++ analyze_cgraph_edge (e); ++ for (cgraph_edge *e = n->indirect_calls; e; e = e->next_callee) ++ analyze_cgraph_edge (e); ++ /* Analyze assign (with casts) statements. */ ++ analyze_assigns (fn); ++ } ++} ++ ++static void ++process_cbase_to_ptype_map () ++{ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "\nProcess types in cbase-to-ptypes map:\n"); ++ ++ for (type_alias_map::iterator it1 = cbase_to_ptype->begin (); ++ it1 != cbase_to_ptype->end (); ++it1) ++ { ++ type_set *set = it1->second; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_type_uid_with_set ("cb=(%d): ", (*type_uid_map)[it1->first], ++ cbase_to_ptype); ++ tree ctype = NULL; ++ for (type_set::const_iterator it2 = set->begin (); ++ it2 != set->end (); it2++) ++ { ++ tree t2 = (*type_uid_map)[*it2]; ++ if (t2 == TYPE_MAIN_VARIANT (t2)) ++ { ++ ctype = t2; ++ break; ++ } ++ } ++ if (!ctype) ++ continue; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_type_with_uid ("Select canonical type: ", ctype); ++ for (type_set::const_iterator it2 = set->begin (); ++ it2 != set->end (); it2++) ++ { ++ tree t = (*type_uid_map)[*it2]; ++ if (!ctype_map->count (t)) ++ { ++ (*ctype_map)[t] = ctype; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Set canonical type for (%d)->c(%d)\n", ++ *it2, TYPE_UID (ctype)); ++ } ++ else if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Canonical type is already set (%d)->c(%d)\n", ++ *it2, TYPE_UID ((*ctype_map)[t])); ++ } ++ } ++} ++ ++static void ++set_canonical_type_for_type_set (type_set *set) ++{ ++ tree one_canonical = NULL; ++ for (type_set::const_iterator it = set->begin (); it != set->end (); it++) ++ { ++ tree t = (*type_uid_map)[*it]; ++ gcc_assert (t); ++ if ((TYPE_CANONICAL (t) || ctype_map->count (t))) ++ { ++ one_canonical = TYPE_CANONICAL (t) ? TYPE_CANONICAL (t) ++ : (*ctype_map)[t]; ++ gcc_assert (COMPLETE_TYPE_P (t)); ++ break; ++ } ++ } ++ for (type_set::const_iterator it = set->begin (); it != set->end (); it++) ++ { ++ tree t = (*type_uid_map)[*it]; ++ if (!ctype_map->count (t)) ++ { ++ (*ctype_map)[t] = one_canonical; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ if (one_canonical) ++ fprintf (dump_file, "Set canonical type for (%d)->c(%d)\n", ++ TYPE_UID (t), TYPE_UID (one_canonical)); ++ else ++ fprintf (dump_file, "Set NULL canonical for (%d)\n", *it); ++ } ++ } ++ else if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ tree ct = (*ctype_map)[t]; ++ fprintf (dump_file, "Canonical type is already set (%d)->c(%d)\n", ++ TYPE_UID (t), ct ? TYPE_UID (ct) : -1); ++ } ++ } ++} ++ ++static void ++dump_is_type_set_incomplete (type_set * set) ++{ ++ bool has_complete_types = false; ++ for (type_set::const_iterator it = set->begin (); it != set->end (); it++) ++ if (COMPLETE_TYPE_P ((*type_uid_map)[*it])) ++ { ++ has_complete_types = true; ++ break; ++ } ++ if (!has_complete_types) ++ fprintf (dump_file, "Set of incomplete types\n"); ++} ++ ++static void ++process_alias_type_sets () ++{ ++ if (dump_file) ++ fprintf (dump_file, "\nProcess alias sets of types:\n"); ++ /* Keep processed types to process each type set (in ta_map) only once. */ ++ type_set processed_types; ++ for (type_alias_map::iterator it1 = ta_map->begin (); ++ it1 != ta_map->end (); ++it1) ++ { ++ tree type = (*type_uid_map)[it1->first]; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_type_uid_with_set ("(%d) ", type, ta_map); ++ if (processed_types.count (TYPE_UID (type)) != 0 ++ || unsafe_types->count (TYPE_UID (type)) != 0) ++ continue; ++ type_set *set = it1->second; ++ for (type_set::const_iterator it2 = set->begin (); ++ it2 != set->end (); it2++) ++ processed_types.insert (*it2); ++ /* Check if this type set contains function pointers and ++ non-function pointers. */ ++ bool has_no_fp = false, has_fp = false; ++ for (type_set::const_iterator it2 = set->begin (); ++ it2 != set->end (); it2++) ++ { ++ tree t2 = (*type_uid_map)[*it2]; ++ if (FUNCTION_POINTER_TYPE_P (t2)) ++ has_fp = true; ++ else ++ has_no_fp = true; ++ if (has_fp && has_no_fp) ++ break; ++ } ++ if (has_fp) ++ { ++ for (type_set::const_iterator it2 = set->begin (); ++ it2 != set->end (); it2++) ++ { ++ tree t2 = (*type_uid_map)[*it2]; ++ /* If it's a type set with mixed function and not-function types, ++ mark all function pointer types in the set as unsafe. */ ++ if (has_no_fp && FUNCTION_POINTER_TYPE_P (t2)) ++ { ++ tree ftype = TREE_TYPE (t2); ++ unsafe_types->insert (TYPE_UID (ftype)); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Insert function type (%d) to unsafe " ++ "due to escape its pointer type (%d) to mixed " ++ "alias set (printed before)\n", ++ TYPE_UID (ftype), TYPE_UID (t2)); ++ } ++ /* If it's a type set with only function pointer types, ++ mark all base function types in the set as aliases. */ ++ if (!has_no_fp) ++ { ++ gcc_assert (FUNCTION_POINTER_TYPE_P (type) ++ && FUNCTION_POINTER_TYPE_P (t2)); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Insert function type aliases by " ++ "function pointer aliases:\n"); ++ register_ailas_type (TREE_TYPE (type), TREE_TYPE (t2), ++ fta_map); ++ } ++ } ++ } ++ set_canonical_type_for_type_set (set); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_is_type_set_incomplete (set); ++ } ++} ++ ++static void ++dump_unsafe_and_canonical_types () ++{ ++ fprintf (dump_file, "\nList of unsafe types:\n"); ++ for (type_set::iterator it = unsafe_types->begin (); ++ it != unsafe_types->end (); ++it) ++ { ++ print_generic_expr (dump_file, (*type_uid_map)[*it]); ++ fprintf (dump_file, " (%d)\n", *it); ++ } ++ fprintf (dump_file, "\nList of alias canonical types:\n"); ++ for (type_alias_map::iterator it = ta_map->begin (); ++ it != ta_map->end (); ++it) ++ { ++ tree type = (*type_uid_map)[it->first]; ++ if (ctype_map->count (type) == 0) ++ continue; ++ print_generic_expr (dump_file, type); ++ fprintf (dump_file, " -> "); ++ tree ctype = (*ctype_map)[type]; ++ if (ctype != NULL) ++ { ++ print_generic_expr (dump_file, ctype); ++ fprintf (dump_file, " (%d)->(%d)\n", ++ TYPE_UID (type), TYPE_UID (ctype)); ++ } ++ else ++ fprintf (dump_file, " null\n"); ++ } ++} ++ ++static void ++init_function_type_alias_for_edge (cgraph_edge *e) ++{ ++ gcall *stmt = e->call_stmt; ++ gcc_assert (stmt != NULL); ++ tree fntype = get_call_fntype (stmt); ++ if (fta_map->count (TYPE_UID (fntype)) == 0) ++ register_ailas_type (fntype, fntype, fta_map); ++} ++ ++/* This pass over all function types makes each function type to have ++ at least one alias (itself). */ ++ ++static void ++init_function_type_aliases () ++{ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "\nInit aliases for all function types.\n"); ++ ++ struct cgraph_node *n; ++ FOR_EACH_FUNCTION (n) ++ { ++ tree fntype = TREE_TYPE (n->decl); ++ if (fta_map->count (TYPE_UID (fntype)) == 0) ++ register_ailas_type (fntype, fntype, fta_map); ++ ++ if (!n->has_gimple_body_p ()) ++ continue; ++ n->get_body (); ++ function *fn = DECL_STRUCT_FUNCTION (n->decl); ++ if (!fn) ++ continue; ++ ++ /* Init for function types of direct/indirect callees. */ ++ for (cgraph_edge *e = n->callees; e; e = e->next_callee) ++ init_function_type_alias_for_edge (e); ++ for (cgraph_edge *e = n->indirect_calls; e; e = e->next_callee) ++ init_function_type_alias_for_edge (e); ++ } ++} ++ ++/* In lto-common.c there is the global canonical type table and the ++ corresponding machinery which detects the same types from differens ++ modules and joins them assigning the one canonical type. However ++ lto does not set the goal to do a complete and precise matching, so ++ sometimes a few types has no TYPE_CANONICAL set. Since ICP relies on ++ precise type matching, we create the similar table and register all ++ the required types in it. */ ++ ++static std::map *canonical_type_hash_cache = NULL; ++static std::map *icp_canonical_types = NULL; ++ ++static hashval_t hash_canonical_type (tree type); ++ ++/* Register canonical type in icp_canonical_types and ctype_map evaluating ++ its hash (using hash_canonical_type) if it's needed. */ ++ ++static hashval_t ++icp_register_canonical_type (tree t) ++{ ++ hashval_t hash; ++ if (canonical_type_hash_cache->count ((const_tree) t) == 0) ++ { ++ tree t1 = TYPE_MAIN_VARIANT (t); ++ if (!COMPLETE_TYPE_P (t1) && TYPE_CANONICAL (t1) ++ && COMPLETE_TYPE_P (TYPE_CANONICAL (t1))) ++ { ++ t1 = TYPE_CANONICAL (t1); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Use complete canonical (%d) for (%d)\n", ++ TYPE_UID (t1), TYPE_UID (t)); ++ } ++ hash = hash_canonical_type (t1); ++ /* Cache the just computed hash value. */ ++ (*canonical_type_hash_cache)[(const_tree) t] = hash; ++ } ++ else ++ hash = (*canonical_type_hash_cache)[(const_tree) t]; ++ ++ tree new_type = t; ++ if (icp_canonical_types->count (hash)) ++ { ++ new_type = (*icp_canonical_types)[hash]; ++ gcc_checking_assert (new_type != t); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Found canonical (%d) for (%d), h=%u\n", ++ TYPE_UID (new_type), TYPE_UID (t), (unsigned int) hash); ++ } ++ else ++ { ++ (*icp_canonical_types)[hash] = t; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Register canonical %d, h=%u\n", TYPE_UID (t), ++ (unsigned int) hash); ++ } ++ if (ctype_map->count (t) == 0) ++ (*ctype_map)[t] = new_type; ++ return hash; ++} ++ ++/* Merge hstate with hash of the given type. If the type is not registered, ++ register it in the maps of the canonical types. */ ++ ++static void ++iterative_hash_canonical_type (tree type, inchash::hash &hstate) ++{ ++ hashval_t v; ++ /* All type variants have same TYPE_CANONICAL. */ ++ type = TYPE_MAIN_VARIANT (type); ++ if (canonical_type_hash_cache->count ((const_tree) type)) ++ v = (*canonical_type_hash_cache)[(const_tree) type]; ++ else ++ v = icp_register_canonical_type (type); ++ hstate.merge_hash (v); ++} ++ ++/* Compute and return hash for the given type. It does not take into account ++ base types of pointer types. */ ++ ++static hashval_t ++hash_canonical_type (tree type) ++{ ++ inchash::hash hstate; ++ enum tree_code code; ++ /* Combine a few common features of types so that types are grouped into ++ smaller sets; when searching for existing matching types to merge, ++ only existing types having the same features as the new type will be ++ checked. */ ++ code = tree_code_for_canonical_type_merging (TREE_CODE (type)); ++ hstate.add_int (code); ++ if (!RECORD_OR_UNION_TYPE_P (type)) ++ hstate.add_int (TYPE_MODE (type)); ++ /* Incorporate common features of numerical types. */ ++ if (INTEGRAL_TYPE_P (type) ++ || SCALAR_FLOAT_TYPE_P (type) ++ || FIXED_POINT_TYPE_P (type) ++ || TREE_CODE (type) == OFFSET_TYPE ++ || POINTER_TYPE_P (type)) ++ { ++ hstate.add_int (TYPE_PRECISION (type)); ++ if (!type_with_interoperable_signedness (type)) ++ hstate.add_int (TYPE_UNSIGNED (type)); ++ } ++ if (VECTOR_TYPE_P (type)) ++ { ++ hstate.add_poly_int (TYPE_VECTOR_SUBPARTS (type)); ++ hstate.add_int (TYPE_UNSIGNED (type)); ++ } ++ if (TREE_CODE (type) == COMPLEX_TYPE) ++ hstate.add_int (TYPE_UNSIGNED (type)); ++ if (POINTER_TYPE_P (type)) ++ hstate.add_int (TYPE_ADDR_SPACE (TREE_TYPE (type))); ++ /* For array types hash the domain bounds and the string flag. */ ++ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type)) ++ { ++ hstate.add_int (TYPE_STRING_FLAG (type)); ++ /* OMP lowering can introduce error_mark_node in place of ++ random local decls in types. */ ++ if (TYPE_MIN_VALUE (TYPE_DOMAIN (type)) != error_mark_node) ++ inchash::add_expr (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), hstate); ++ if (TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != error_mark_node) ++ inchash::add_expr (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), hstate); ++ } ++ /* Recurse for aggregates with a single element type. */ ++ if (TREE_CODE (type) == ARRAY_TYPE ++ || TREE_CODE (type) == COMPLEX_TYPE ++ || TREE_CODE (type) == VECTOR_TYPE) ++ iterative_hash_canonical_type (TREE_TYPE (type), hstate); ++ /* Incorporate function return and argument types. */ ++ if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE) ++ { ++ unsigned nargs = 0; ++ iterative_hash_canonical_type (TREE_TYPE (type), hstate); ++ for (tree p = TYPE_ARG_TYPES (type); p; p = TREE_CHAIN (p)) ++ { ++ iterative_hash_canonical_type (TREE_VALUE (p), hstate); ++ nargs++; ++ } ++ hstate.add_int (nargs); ++ } ++ if (RECORD_OR_UNION_TYPE_P (type)) ++ { ++ unsigned nfields = 0; ++ for (tree f = TYPE_FIELDS (type); f; f = TREE_CHAIN (f)) ++ if (TREE_CODE (f) == FIELD_DECL) ++ { ++ iterative_hash_canonical_type (TREE_TYPE (f), hstate); ++ nfields++; ++ } ++ hstate.add_int (nfields); ++ } ++ return hstate.end (); ++} ++ ++/* It finds canonical type in ctype_map and icp_canonical_types maps. */ ++ ++static tree ++find_canonical_type (tree type) ++{ ++ if (ctype_map->count (type)) ++ return (*ctype_map)[type]; ++ if (canonical_type_hash_cache->count ((const_tree) type) == 0) ++ return NULL; ++ hashval_t h = (*canonical_type_hash_cache)[(const_tree) type]; ++ if (icp_canonical_types->count (h)) ++ return (*icp_canonical_types)[h]; ++ return NULL; ++} ++ ++/* It updates hash for the given type taking into account pointees in pointer ++ types. If the type is incomplete function type, it returns true. It's used ++ only for function type hash calculation. */ ++ ++static bool ++initial_hash_canonical_type (tree type, inchash::hash &hstate) ++{ ++ /* All type variants have same TYPE_CANONICAL. */ ++ type = TYPE_MAIN_VARIANT (type); ++ if (VOID_TYPE_P (type)) ++ { ++ hstate.add_int (POINTER_TYPE); ++ return false; ++ } ++ hstate.add_int (TREE_CODE (type)); ++ hstate.add_int (TYPE_MODE (type)); ++ if (POINTER_TYPE_P (type)) ++ { ++ tree base_type = TREE_TYPE (type); ++ hstate.add_int (TYPE_ADDR_SPACE (base_type)); ++ return initial_hash_canonical_type (base_type, hstate); ++ } ++ tree ctype = find_canonical_type (type); ++ if (!ctype) ++ { ++ if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Due to ftype (%d)\n", TYPE_UID (type)); ++ return true; ++ } ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_type_with_uid ("Has NO canonical type: ", type, TDF_UID); ++ icp_register_canonical_type (type); ++ if (ctype_map->count(type)) ++ ctype = (*ctype_map)[type]; ++ if (ctype && dump_file && (dump_flags & TDF_DETAILS)) ++ dump_type_with_uid ("Found canonical type: ", ctype, TDF_UID); ++ } ++ else if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_type_with_uid ("Canonical type: ", ctype, TDF_UID); ++ hstate.add_int (TYPE_UID (ctype)); ++ return false; ++} ++ ++/* It returns hash value for the given function type. If the function type is ++ incomplete, insert it in the incomplete_hash_ftype set. */ ++ ++static hashval_t ++get_hash_for_ftype (tree type, type_set *incomplete_hash_ftype) ++{ ++ bool incomplete = false; ++ inchash::hash hstate; ++ /* Function type is expected. */ ++ gcc_assert (TREE_CODE (type) == FUNCTION_TYPE ++ || TREE_CODE (type) == METHOD_TYPE); ++ /* Hash return type. */ ++ tree rt = TREE_TYPE (type); ++ tree ct = rt ? find_canonical_type (rt) : void_type_node; ++ incomplete |= initial_hash_canonical_type (ct ? ct : rt, hstate); ++ /* Hash arg types. */ ++ tree argt = TYPE_ARG_TYPES (type); ++ if (!argt) ++ incomplete |= initial_hash_canonical_type (void_type_node, hstate); ++ else ++ for (unsigned i = 1; argt; ++i, argt = TREE_CHAIN (argt)) ++ { ++ tree ct = find_canonical_type (TREE_VALUE (argt)); ++ ct = ct ? ct : TREE_VALUE (argt); ++ incomplete |= initial_hash_canonical_type (ct, hstate); ++ } ++ if (incomplete && incomplete_hash_ftype->count (TYPE_UID (type)) == 0) ++ incomplete_hash_ftype->insert (TYPE_UID (type)); ++ else if (!incomplete && incomplete_hash_ftype->count (TYPE_UID (type)) != 0) ++ incomplete_hash_ftype->erase (TYPE_UID (type)); ++ return hstate.end(); ++} ++ ++/* Find type aliases evaluating type hashes and connecting types with ++ the same hash values. */ ++ ++static void ++find_type_aliases_by_compatibility () ++{ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "\nFind type aliases checking their compatibility.\n"); ++ ++ std::map hash_to_ftype; ++ type_set *incomplete_hash_ftype = new type_set; ++ canonical_type_hash_cache = new std::map; ++ icp_canonical_types = new std::map; ++ ++ bool changed; ++ int i = 0; ++ do ++ { ++ changed = false; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Iteration %d\n", i); ++ for (type_alias_map::iterator it = fta_map->begin (); ++ it != fta_map->end (); ++it) ++ { ++ tree type = (*type_uid_map)[it->first]; ++ if (TYPE_CANONICAL (type)) ++ continue; ++ hashval_t hash = get_hash_for_ftype (type, incomplete_hash_ftype); ++ if (incomplete_hash_ftype->count (TYPE_UID (type)) != 0) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Incomplete (%d), h=%u\n", TYPE_UID (type), ++ (unsigned int) hash); ++ continue; ++ } ++ if (hash_to_ftype.count (hash) == 0) ++ hash_to_ftype[hash] = type; ++ TYPE_CANONICAL (type) = hash_to_ftype[hash]; ++ changed = true; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "(%d)->(%d), h=%u\n", TYPE_UID (type), ++ TYPE_UID (TYPE_CANONICAL (type)), (unsigned int) hash); ++ } ++ i++; ++ } ++ while (changed); ++ ++ delete incomplete_hash_ftype; ++ delete icp_canonical_types; ++ delete canonical_type_hash_cache; ++} ++ ++static void ++dump_function_type_aliases_list () ++{ ++ fprintf (dump_file, "\nList of function type aliases:\n"); ++ for (type_alias_map::iterator it = fta_map->begin (); ++ it != fta_map->end (); ++it) ++ dump_type_uid_with_set ("(%d) ", (*type_uid_map)[it->first], fta_map); ++} ++ ++/* Collect type aliases and find missed canonical types. */ ++ ++static void ++collect_function_type_aliases () ++{ ++ collect_type_alias_sets (); ++ process_cbase_to_ptype_map (); ++ process_alias_type_sets (); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_unsafe_and_canonical_types (); ++ ++ /* TODO: maybe remove this pass. */ ++ init_function_type_aliases (); ++ for (type_alias_map::iterator it = fta_map->begin (); ++ it != fta_map->end (); ++it) ++ set_canonical_type_for_type_set (it->second); ++ find_type_aliases_by_compatibility (); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ dump_function_type_aliases_list (); ++} ++ ++static void ++dump_function_signature_info (struct cgraph_node *n, tree ftype, bool varargs) ++{ ++ fprintf (dump_file, "Function decl: "); ++ print_generic_expr (dump_file, n->decl); ++ dump_type_uid_with_set (" with type (%d) ", ftype, fta_map, true, false); ++ if (varargs) ++ fprintf (dump_file, "has varargs, "); ++ if (TREE_CODE (ftype) == METHOD_TYPE) ++ fprintf (dump_file, "is method, "); ++ if (!n->address_taken) ++ fprintf (dump_file, "is not address taken, "); ++ if (unsafe_types->count (TYPE_UID (ftype))) ++ fprintf (dump_file, "is unsafe, "); ++ fprintf (dump_file, "\n"); ++} ++ ++/* Check if the function has variadic arguments. ++ It's corrected count_num_arguments (). */ ++ ++static bool ++has_varargs (tree decl) ++{ ++ tree t; ++ unsigned int num = 0; ++ for (t = TYPE_ARG_TYPES (TREE_TYPE (decl)); ++ t && TREE_VALUE (t) != void_type_node; t = TREE_CHAIN (t)) ++ num++; ++ if (!t && num) ++ return true; ++ return false; ++} ++ ++/* Join fs_map's sets for function type aliases. */ ++ ++static void ++merge_fs_map_for_ftype_aliases () ++{ ++ if (dump_file) ++ fprintf (dump_file, "\n\nMerge decl sets for function type aliases:\n"); ++ type_set processed_types; ++ for (type_decl_map::iterator it1 = fs_map->begin (); ++ it1 != fs_map->end (); ++it1) ++ { ++ if (processed_types.count (it1->first) != 0) ++ continue; ++ decl_set *d_set = it1->second; ++ tree type = (*type_uid_map)[it1->first]; ++ type_set *set = (*fta_map)[it1->first]; ++ for (type_set::const_iterator it2 = set->begin (); ++ it2 != set->end (); it2++) ++ { ++ tree t2 = (*type_uid_map)[*it2]; ++ processed_types.insert (*it2); ++ if (type == t2) ++ continue; ++ gcc_assert ((TREE_CODE (type) == FUNCTION_TYPE ++ || TREE_CODE (type) == METHOD_TYPE) ++ && (TREE_CODE (t2) == FUNCTION_TYPE ++ || TREE_CODE (t2) == METHOD_TYPE)); ++ if (fs_map->count (*it2) == 0 || (*fs_map)[*it2] == NULL) ++ (*fs_map)[*it2] = d_set; ++ else ++ { ++ decl_set *t2_decl_set = (*fs_map)[*it2]; ++ (*fs_map)[*it2] = d_set; ++ gcc_assert (t2_decl_set && t2_decl_set->size() > 0); ++ d_set->insert (t2_decl_set->begin (), t2_decl_set->end ()); ++ delete t2_decl_set; ++ } ++ } ++ } ++} ++ ++/* Dump function types with set of functions corresponding to it. */ ++ ++static void ++dump_function_signature_sets () ++{ ++ fprintf (dump_file, "\n\nUnique sets of function signatures:\n"); ++ std::set processed_sets; ++ for (type_decl_map::iterator it1 = fs_map->begin (); ++ it1 != fs_map->end (); ++it1) ++ { ++ decl_set *set = it1->second; ++ if (processed_sets.count (set) != 0) ++ continue; ++ processed_sets.insert (set); ++ fprintf (dump_file, "{ "); ++ print_type_set (it1->first, fta_map); ++ fprintf (dump_file, " : "); ++ for (decl_set::const_iterator it2 = set->begin (); ++ it2 != set->end (); it2++) ++ { ++ fprintf (dump_file, it2 == set->begin () ? "" : ", "); ++ print_generic_expr (dump_file, *it2); ++ fprintf (dump_file, "(%d)", DECL_UID (*it2)); ++ } ++ fprintf (dump_file, "}\n"); ++ } ++} ++ ++/* Fill the map of function types to sets of function decls. */ ++ ++static void ++collect_function_signatures () ++{ ++ if (dump_file) ++ fprintf (dump_file, "\n\nCollect function signatures:\n"); ++ struct cgraph_node *n; ++ FOR_EACH_FUNCTION (n) ++ { ++ gcc_assert (n->decl && TREE_TYPE (n->decl)); ++ tree ftype = TREE_TYPE (n->decl); ++ bool varargs = has_varargs (n->decl); ++ if (varargs && n->address_taken) ++ has_address_taken_functions_with_varargs = true; ++ if (dump_file) ++ dump_function_signature_info (n, ftype, varargs); ++ if (!n->address_taken) ++ continue; ++ /* TODO: make a separate pass at the end to remove canonicals. */ ++ tree ctype = TYPE_CANONICAL (ftype); ++ unsigned alias_type_fs = ctype ? TYPE_UID (ctype) : 0; ++ if (dump_file) ++ fprintf (dump_file, "canonical type: %d %ld\n", ++ alias_type_fs, fs_map->count (alias_type_fs)); ++ if (alias_type_fs) ++ { ++ if (fs_map->count (TYPE_UID (ctype)) == 0) ++ (*fs_map)[TYPE_UID (ctype)] = new decl_set (); ++ if (dump_file) ++ fprintf (dump_file, "insert decl (%d) to set of map [%d]\n", ++ DECL_UID (n->decl), TYPE_UID (ctype)); ++ (*fs_map)[TYPE_UID (ctype)]->insert (n->decl); ++ } ++ } ++ merge_fs_map_for_ftype_aliases (); ++ if (dump_file) ++ dump_function_signature_sets (); ++} ++ ++#define MAX_TARG_STAT 4 ++struct icp_stats ++{ ++ int npolymorphic; ++ int nspeculated; ++ int nsubst; ++ int ncold; ++ int nmultiple; ++ int noverwritable; ++ int nnotdefined; ++ int nexternal; ++ int nartificial; ++ int nremove; ++ int nicp; ++ int nspec; ++ int nf; ++ int ncalls; ++ int nindir; ++ int nind_only; ++ int ntargs[MAX_TARG_STAT + 1]; ++}; ++ ++static void ++dump_processing_function (struct cgraph_node *n, struct icp_stats &stats) ++{ ++ fprintf (dump_file, "\n\nProcesing function %s\n", n->dump_name ()); ++ print_generic_expr (dump_file, n->decl); ++ fprintf (dump_file, "\n"); ++ dump_type_with_uid ("Func's type: ", TREE_TYPE (n->decl)); ++ if (dump_file && (dump_flags & TDF_STATS)) ++ { ++ struct cgraph_edge *e; ++ stats.nf++; ++ for (e = n->indirect_calls; e; e = e->next_callee) ++ stats.nindir++; ++ for (e = n->callees; e; e = e->next_callee) ++ stats.ncalls++; ++ stats.ncalls += stats.nindir; ++ if (n->callers == NULL) ++ { ++ fprintf (dump_file, "Function has NO callers\n"); ++ stats.nind_only++; ++ } ++ } ++} ++ ++static void ++dump_indirect_call_site (tree call_fn, tree call_fn_ty) ++{ ++ fprintf (dump_file, "Indirect call site: "); ++ print_generic_expr (dump_file, call_fn); ++ dump_type_with_uid ("\nFunction pointer type: ", call_fn_ty); ++} ++ ++static void ++erase_from_unreachable (unsigned type_uid, type_set &unreachable) ++{ ++ unreachable.erase (type_uid); ++ if (!fta_map->count (type_uid)) ++ return; ++ type_set *set = (*fta_map)[type_uid]; ++ for (type_set::const_iterator it = set->begin (); it != set->end (); it++) ++ unreachable.erase (*it); ++} ++ ++static void ++dump_found_fdecls (decl_set *decls, unsigned ctype_uid) ++{ ++ fprintf (dump_file, "Signature analysis FOUND decls (%d):", ctype_uid); ++ for (decl_set::const_iterator it = decls->begin (); it != decls->end (); it++) ++ { ++ print_generic_expr (dump_file, *it); ++ fprintf (dump_file, "(%d), ", DECL_UID (*it)); ++ } ++ if (unsafe_types->count (ctype_uid)) ++ fprintf (dump_file, "type is UNSAFE"); ++ fprintf (dump_file, "\n"); ++} ++ ++static void ++count_found_targets (struct icp_stats &stats, unsigned size) ++{ ++ gcc_assert (size > 0); ++ stats.ntargs[size > MAX_TARG_STAT ? MAX_TARG_STAT : size - 1]++; ++} ++ ++/* Promote the indirect call. */ ++ ++static void ++promote_call (struct cgraph_edge *e, struct cgraph_node *n, ++ struct cgraph_node *likely_target, struct icp_stats *stats) ++{ ++ if (dump_enabled_p ()) ++ { ++ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, e->call_stmt, ++ "promoting indirect call in %s to %s\n", ++ n->dump_name (), likely_target->dump_name ()); ++ } ++ if (!likely_target->can_be_discarded_p ()) ++ { ++ symtab_node *sn = likely_target->noninterposable_alias (); ++ cgraph_node *alias = dyn_cast (sn); ++ if (alias) ++ likely_target = alias; ++ } ++ gimple *new_call; ++ if (flag_icp_speculatively) ++ { ++ e->make_speculative (likely_target, e->count.apply_scale (5, 10)); ++ new_call = e->call_stmt; ++ stats->nspec++; ++ } ++ else ++ { ++ cgraph_edge *e2 = cgraph_edge::make_direct (e, likely_target); ++ new_call = cgraph_edge::redirect_call_stmt_to_callee (e2); ++ stats->nsubst++; ++ } ++ if (dump_file) ++ { ++ fprintf (dump_file, "The call is substituted by: "); ++ print_gimple_stmt (dump_file, new_call, 0); ++ fprintf (dump_file, "\n"); ++ } ++} ++ ++/* Find functions which are called only indirectly and if they are not in ++ fs_map, they can be removed. For now it is used only to print stats. */ ++ ++static int ++find_functions_can_be_removed (type_set &unreachable) ++{ ++ int nremove = 0; ++ if (dump_file) ++ fprintf (dump_file, "\nRemove unused functions:\n"); ++ struct cgraph_node *n; ++ FOR_EACH_FUNCTION (n) ++ { ++ gcc_assert (n->decl && TREE_TYPE (n->decl)); ++ if (n->callers != NULL) ++ continue; ++ tree ftype = TREE_TYPE (n->decl); ++ tree ctype = TYPE_CANONICAL (ftype); ++ if (!ctype || !unreachable.count (TYPE_UID (ctype)) ++ || unsafe_types->count (TYPE_UID (ftype)) ++ || TREE_CODE (ftype) == METHOD_TYPE || n->callers != NULL ++ || !n->definition || n->alias || n->thunk || n->clones) ++ continue; ++ if (dump_file) ++ fprintf (dump_file, "%s is not used\n", n->dump_name ()); ++ nremove++; ++ } ++ return nremove; ++} ++ ++static void ++dump_stats (struct icp_stats &st) ++{ ++ fprintf (dump_file, "\nSTATS: %i candidates for indirect call promotion," ++ " %i substituted, %i speculatively promoted, %i cold\n" ++ "%i have multiple targets, %i already speculated, %i external," ++ " %i not defined, %i artificial, %i polymorphic calls," ++ " %i overwritable\n", st.nicp, st.nsubst, st.nspec, st.ncold, ++ st.nmultiple, st.nspeculated, st.nexternal, st.nnotdefined, ++ st.nartificial, st.npolymorphic, st.noverwritable); ++ if (!(dump_flags & TDF_STATS)) ++ return; ++ fprintf (dump_file, "EXTRA STATS: %i functions, %i indirect calls," ++ " %i total calls, %i called only indirectly, %i may be removed\n" ++ "Indirect call sites with found targets ", st.nf, st.nindir, ++ st.ncalls, st.nind_only, st.nremove); ++ for (unsigned i = 0; i < MAX_TARG_STAT; i++) ++ fprintf (dump_file, "%u:%i, ", i + 1, st.ntargs[i]); ++ fprintf (dump_file, "more:%i\n", st.ntargs[MAX_TARG_STAT]); ++} ++ ++/* Optimize indirect calls. When an indirect call has only one target, ++ promote it into a direct call. */ ++ ++static bool ++optimize_indirect_calls () ++{ ++ /* TODO: maybe move to the top of ipa_icp. */ ++ if (has_address_taken_functions_with_varargs) ++ { ++ if (dump_file) ++ fprintf (dump_file, "\n\nAddress taken function with varargs is found." ++ " Skip the optimization.\n"); ++ return false; ++ } ++ struct icp_stats stats = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, {0, 0, 0, 0, 0}}; ++ /* At first assume all function types are unreadchable. */ ++ type_set unreachable_ftypes; ++ if (dump_file && (dump_flags & TDF_STATS)) ++ for (type_decl_map::iterator it = fs_map->begin (); ++ it != fs_map->end (); ++it) ++ unreachable_ftypes.insert (it->first); ++ ++ struct cgraph_node *n; ++ FOR_EACH_DEFINED_FUNCTION (n) ++ { ++ if (dump_file) ++ dump_processing_function (n, stats); ++ struct cgraph_edge *e; ++ bool update = false; ++ if (!opt_for_fn (n->decl, flag_icp) || !n->has_gimple_body_p () ++ || n->inlined_to || !n->indirect_calls) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Skip the function\n"); ++ continue; ++ } ++ /* If the function has indirect calls which are not polymorphic, ++ process its body, otherwise continue. */ ++ bool non_polymorphic_calls = false; ++ for (e = n->indirect_calls; e; e = e->next_callee) ++ if (!e->indirect_info->polymorphic) ++ { ++ non_polymorphic_calls = true; ++ break; ++ } ++ if (!non_polymorphic_calls) ++ { ++ if (dump_file) ++ fprintf (dump_file, "All indirect calls are polymorphic," ++ "skip...\n"); ++ continue; ++ } ++ /* Get the function body to operate with call statements. */ ++ n->get_body (); ++ /* Walk indirect call sites and apply the optimization. */ ++ cgraph_edge *next; ++ for (e = n->indirect_calls; e; e = next) ++ { ++ next = e->next_callee; ++ if (e->indirect_info->polymorphic) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Target is polymorphic, skip...\n\n"); ++ stats.npolymorphic++; ++ continue; ++ } ++ stats.nicp++; ++ struct cgraph_node *likely_target = NULL; ++ gcall *stmt = e->call_stmt; ++ gcc_assert (stmt != NULL); ++ tree call_fn = gimple_call_fn (stmt); ++ tree call_fn_ty = TREE_TYPE (call_fn); ++ if (dump_file) ++ dump_indirect_call_site (call_fn, call_fn_ty); ++ tree decl = NULL_TREE; ++ if (POINTER_TYPE_P (call_fn_ty)) ++ { ++ if (dump_file) ++ dump_type_with_uid ("Pointee type: ", TREE_TYPE (call_fn_ty)); ++ if (dump_file && (dump_flags & TDF_STATS)) ++ erase_from_unreachable (TYPE_UID (TREE_TYPE (call_fn_ty)), ++ unreachable_ftypes); ++ /* Try to use the signature analysis results. */ ++ tree ctype = TYPE_CANONICAL (TREE_TYPE (call_fn_ty)); ++ unsigned ctype_uid = ctype ? TYPE_UID (ctype) : 0; ++ if (ctype_uid && fs_map->count (ctype_uid)) ++ { ++ if (dump_flags && (dump_flags & TDF_STATS)) ++ erase_from_unreachable (ctype_uid, unreachable_ftypes); ++ decl_set *decls = (*fs_map)[ctype_uid]; ++ if (dump_file) ++ dump_found_fdecls (decls, ctype_uid); ++ /* TODO: optimize for multple targets. */ ++ if (!unsafe_types->count (ctype_uid) && decls->size () == 1) ++ { ++ decl = *(decls->begin ()); ++ likely_target = cgraph_node::get (decl); ++ } ++ if (!unsafe_types->count (ctype_uid) ++ && (dump_flags & TDF_STATS)) ++ count_found_targets (stats, decls->size ()); ++ } ++ } ++ if (!decl || !likely_target) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Callee is unknown\n\n"); ++ continue; ++ } ++ if (TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Callee is method\n\n"); ++ continue; ++ } ++ if (e->speculative) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Call is already speculated\n\n"); ++ stats.nspeculated++; ++ continue; ++ } ++ if (!likely_target->definition) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Target is not a definition\n\n"); ++ stats.nnotdefined++; ++ continue; ++ } ++ /* Do not introduce new references to external symbols. While we ++ can handle these just well, it is common for programs to ++ incorrectly with headers defining methods they are linked ++ with. */ ++ if (DECL_EXTERNAL (likely_target->decl)) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Target is external\n\n"); ++ stats.nexternal++; ++ continue; ++ } ++ /* Don't use an implicitly-declared destructor (c++/58678). */ ++ struct cgraph_node *non_thunk_target ++ = likely_target->function_symbol (); ++ if (DECL_ARTIFICIAL (non_thunk_target->decl)) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Target is artificial\n\n"); ++ stats.nartificial++; ++ continue; ++ } ++ if (likely_target->get_availability () <= AVAIL_INTERPOSABLE ++ && likely_target->can_be_discarded_p ()) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Target is overwritable\n\n"); ++ stats.noverwritable++; ++ continue; ++ } ++ else if (dbg_cnt (icp)) ++ { ++ promote_call (e, n, likely_target, &stats); ++ update = true; ++ } ++ } ++ if (update) ++ ipa_update_overall_fn_summary (n); ++ } ++ ++ if (dump_file && (dump_flags & TDF_STATS)) ++ stats.nremove = find_functions_can_be_removed (unreachable_ftypes); ++ ++ if (dump_file) ++ dump_stats (stats); ++ return stats.nsubst || stats.nspec; ++} ++ ++/* Delete the given MAP with allocated sets. One set may be associated with ++ more then one type/decl. */ ++ ++template ++static void ++remove_type_alias_map (MAP *map) ++{ ++ std::set processed_sets; ++ for (typename MAP::iterator it = map->begin (); it != map->end (); it++) ++ { ++ typename MAP::mapped_type set = it->second; ++ if (processed_sets.count (set) != 0) ++ continue; ++ processed_sets.insert (set); ++ delete set; ++ } ++ delete map; ++} ++ ++/* The ipa indirect call promotion pass. Run required analysis and optimize ++ indirect calls. ++ When indirect call has only one target, promote it into a direct call. */ ++ ++static unsigned int ++ipa_icp (void) ++{ ++ ta_map = new type_alias_map; ++ fta_map = new type_alias_map; ++ cbase_to_ptype = new type_alias_map; ++ fs_map = new type_decl_map; ++ ctype_map = new type_map; ++ unsafe_types = new type_set; ++ type_uid_map = new uid_to_type_map; ++ ++ /* Find type aliases, fill the function signature map and ++ optimize indirect calls. */ ++ collect_function_type_aliases (); ++ collect_function_signatures (); ++ bool optimized = optimize_indirect_calls (); ++ ++ remove_type_alias_map (ta_map); ++ remove_type_alias_map (fta_map); ++ remove_type_alias_map (cbase_to_ptype); ++ remove_type_alias_map (fs_map); ++ delete ctype_map; ++ delete unsafe_types; ++ delete type_uid_map; ++ ++ return optimized ? TODO_remove_functions : 0; ++} ++ ++namespace { ++ ++const pass_data pass_data_ipa_icp = ++{ ++ IPA_PASS, /* type */ ++ "icp", /* name */ ++ OPTGROUP_NONE, /* optinfo_flags */ ++ TV_IPA_ICP, /* tv_id */ ++ 0, /* properties_required */ ++ 0, /* properties_provided */ ++ 0, /* properties_destroyed */ ++ 0, /* todo_flags_start */ ++ 0, /* todo_flags_finish */ ++}; ++ ++class pass_ipa_icp : public ipa_opt_pass_d ++{ ++public: ++ pass_ipa_icp (gcc::context *ctxt) ++ : ipa_opt_pass_d (pass_data_ipa_icp, ctxt, ++ NULL, /* generate_summary */ ++ NULL, /* write_summary */ ++ NULL, /* read_summary */ ++ NULL, /* write_optimization_summary */ ++ NULL, /* read_optimization_summary */ ++ NULL, /* stmt_fixup */ ++ 0, /* function_transform_todo_flags_start */ ++ NULL, /* function_transform */ ++ NULL) /* variable_transform */ ++ {} ++ ++ /* opt_pass methods: */ ++ virtual bool gate (function *) ++ { ++ return (optimize && flag_icp && !seen_error () ++ && (in_lto_p || flag_whole_program)); ++ } ++ ++ virtual unsigned int execute (function *) { return ipa_icp (); } ++ ++}; // class pass_ipa_icp ++ ++} // anon namespace ++ ++ipa_opt_pass_d * ++make_pass_ipa_icp (gcc::context *ctxt) ++{ ++ return new pass_ipa_icp (ctxt); ++} + + #include "gt-ipa-devirt.h" +diff --git a/gcc/passes.def b/gcc/passes.def +index 9692066e4..d6db9be6e 100644 +--- a/gcc/passes.def ++++ b/gcc/passes.def +@@ -156,6 +156,7 @@ along with GCC; see the file COPYING3. If not see + NEXT_PASS (pass_ipa_profile); + NEXT_PASS (pass_ipa_icf); + NEXT_PASS (pass_ipa_devirt); ++ NEXT_PASS (pass_ipa_icp); + NEXT_PASS (pass_ipa_cp); + NEXT_PASS (pass_ipa_sra); + NEXT_PASS (pass_ipa_cdtor_merge); +diff --git a/gcc/testsuite/gcc.dg/icp1.c b/gcc/testsuite/gcc.dg/icp1.c +new file mode 100644 +index 000000000..c2117f738 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/icp1.c +@@ -0,0 +1,40 @@ ++/* { dg-do run } */ ++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp1.c.077i.icp" } */ ++ ++int dummy = 0; ++ ++typedef int (*ftype1)(int a); ++typedef float (*ftype2)(int a); ++ ++ftype1 func1; ++ ++struct { ++ int a; ++ int* b; ++ ftype1 myf1; ++ ftype2 myf2; ++} my_str; ++ ++int foo(int a) { ++ my_str.myf1 = func1; ++ if (a % 2 == 0) ++ dummy += dummy % (dummy - a); ++ return a + 1; ++} ++ ++float bar(int a) { ++ my_str.myf2 = &bar; ++ func1 = &foo; ++ return foo(a); ++} ++ ++int main() { ++ bar(1); ++ my_str.myf2(3); ++ return (my_str.myf1(2) + func1(4)) != 8; ++} ++ ++/* { dg-final { scan-ipa-dump "The call is substituted by:.*= foo \\(4\\);" "icp" } } */ ++/* { dg-final { scan-ipa-dump "The call is substituted by:.*= foo \\(2\\);" "icp" } } */ ++/* { dg-final { scan-ipa-dump "The call is substituted by: bar \\(3\\);" "icp" } } */ ++/* { dg-final { scan-ipa-dump "STATS: 3 candidates for indirect call promotion, 3 substituted, 0 speculatively promoted, 0 cold" "icp" } } */ +diff --git a/gcc/testsuite/gcc.dg/icp2.c b/gcc/testsuite/gcc.dg/icp2.c +new file mode 100644 +index 000000000..03d31d407 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/icp2.c +@@ -0,0 +1,38 @@ ++/* { dg-do run } */ ++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp2.c.077i.icp" } */ ++ ++int dummy = 0; ++ ++typedef int (*ftype1)(int a); ++typedef float (*ftype2)(int a); ++ ++ftype1 func1; ++ ++struct { ++ int a; ++ int* b; ++ ftype1 myf1; ++ ftype2 myf2; ++} my_str; ++ ++int foo(int a) { ++ my_str.myf1 = func1; ++ if (a % 2 == 0) ++ dummy += dummy % (dummy - a); ++ return a + 1; ++} ++ ++float bar(int a) { ++ my_str.myf2 = dummy ? (ftype2) &foo : &bar; ++ func1 = (ftype1) &bar; ++ return foo(a); ++} ++ ++int main() { ++ bar(1); ++ my_str.myf2(3); ++ return (my_str.myf1(2) + func1(4)) != 8; ++} ++ ++/* { dg-final { scan-ipa-dump-not "The call is substituted by.*" "icp" } } */ ++/* { dg-final { scan-ipa-dump "STATS: 3 candidates for indirect call promotion, 0 substituted, 0 speculatively promoted, 0 cold" "icp" } } */ +diff --git a/gcc/testsuite/gcc.dg/icp3.c b/gcc/testsuite/gcc.dg/icp3.c +new file mode 100644 +index 000000000..2a7d1e6f5 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/icp3.c +@@ -0,0 +1,52 @@ ++/* { dg-do run } */ ++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp3.c.077i.icp" } */ ++ ++#include ++ ++int dummy = 0; ++ ++typedef int (*ftype1)(int a); ++typedef float (*ftype2)(int a); ++typedef ftype1 (*ftype3) (ftype2); ++ ++ftype1 func1; ++ ++struct { ++ int a; ++ int* b; ++ ftype1 myf1; ++ ftype2 myf2; ++ ftype3 myf3; ++} my_str; ++ ++ftype1 boo(ftype2 a) { ++ printf ("Call boo\n"); ++ return (ftype1) a; ++} ++ ++int foo(int a) { ++ printf ("Call foo\n"); ++ my_str.myf1 = func1; ++ if (a % 2 == 0) ++ dummy += dummy % (dummy - a); ++ return a + 1; ++} ++ ++float bar(int a) { ++ printf("Call bar\n"); ++ my_str.myf2 = (ftype2) my_str.myf3((ftype2) foo); ++ func1 = &foo; ++ return foo(a); ++} ++ ++int main() { ++ my_str.myf3 = &boo; ++ bar(1); ++ my_str.myf2(3); ++ return (my_str.myf1(2) + func1(4)) != 8; ++} ++ ++/* { dg-final { scan-ipa-dump "The call is substituted by:.*= foo \\(4\\);" "icp" } } */ ++/* { dg-final { scan-ipa-dump "The call is substituted by:.*= foo \\(2\\);" "icp" } } */ ++/* { dg-final { scan-ipa-dump "The call is substituted by: foo \\(3\\);" "icp" } } */ ++/* { dg-final { scan-ipa-dump "STATS: 4 candidates for indirect call promotion, 3 substituted, 0 speculatively promoted, 0 cold" "icp" } } */ +diff --git a/gcc/testsuite/gcc.dg/icp4.c b/gcc/testsuite/gcc.dg/icp4.c +new file mode 100644 +index 000000000..e3e1d5116 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/icp4.c +@@ -0,0 +1,55 @@ ++/* { dg-do run } */ ++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp4.c.077i.icp" } */ ++ ++#include ++ ++int dummy = 0; ++ ++typedef int (*ftype1)(int a); ++typedef float (*ftype2)(int a); ++typedef ftype1 (*ftype3) (ftype2); ++ ++ftype1 func1; ++ftype1 boo(ftype2 a); ++int foo(int a); ++float bar(int a); ++ ++typedef struct { ++ int a; ++ int* b; ++ ftype1 myf1; ++ ftype2 myf2; ++ ftype3 myf3; ++} T; ++ ++T my_str = {0, (int*) &dummy, (ftype1) &boo, (ftype2) &foo, (ftype3) &bar}; ++ ++ftype1 boo(ftype2 a) { ++ printf ("Call boo\n"); ++ return (ftype1) a; ++} ++ ++int foo(int a) { ++ printf ("Call foo\n"); ++ my_str.myf1 = func1; ++ if (a % 2 == 0) ++ dummy += dummy % (dummy - a); ++ return a + 1; ++} ++ ++float bar(int a) { ++ printf("Call bar\n"); ++ my_str.myf2 = (ftype2) my_str.myf3((ftype2) foo); ++ func1 = &foo; ++ return foo(a); ++} ++ ++int main() { ++ my_str.myf3 = &boo; ++ bar(1); ++ my_str.myf2(3); ++ return (my_str.myf1(2) + func1(4)) != 8; ++} ++ ++/* { dg-final { scan-ipa-dump-not "The call is substituted by.*" "icp" } } */ ++/* { dg-final { scan-ipa-dump "STATS: 4 candidates for indirect call promotion, 0 substituted, 0 speculatively promoted, 0 cold" "icp" } } */ +diff --git a/gcc/testsuite/gcc.dg/icp5.c b/gcc/testsuite/gcc.dg/icp5.c +new file mode 100644 +index 000000000..c7709243c +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/icp5.c +@@ -0,0 +1,66 @@ ++/* { dg-do run } */ ++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp5.c.077i.icp" } */ ++ ++#include ++ ++int dummy = 0; ++ ++typedef int (*ftype1)(int a); ++typedef float (*ftype2)(int a); ++typedef ftype1 (*ftype3) (ftype2); ++ ++ftype1 func1; ++ftype1 boo(ftype2 a); ++int foo(int a); ++float bar(int a); ++ ++typedef struct { ++ int a; ++ int* b; ++ ftype1 myf1; ++ ftype2 myf2; ++ ftype3 myf3; ++} T; ++ ++T my_str; ++ ++typedef struct { ++ int a; ++ int* b; ++ ftype3 myf1; ++ ftype2 myf2; ++ ftype1 myf3; ++} T1; ++ ++T1 my1 = {0, &dummy, boo, &bar, &foo}; ++ ++ftype1 boo(ftype2 a) { ++ printf("Call boo\n"); ++ return (ftype1) a; ++} ++ ++int foo(int a) { ++ printf("Call foo\n"); ++ my_str.myf1 = func1; ++ if (a % 2 == 0) ++ dummy += dummy % (dummy - a); ++ return a + 1; ++} ++ ++float bar(int a) { ++ printf("Call bar\n"); ++ my_str.myf2 = (ftype2) my_str.myf3((ftype2) foo); ++ func1 = &foo; ++ return foo(a); ++} ++ ++int main() { ++ my_str = *(T*)&my1; ++ my_str.myf3 = &boo; ++ bar(1); ++ my_str.myf2(3); ++ return (my_str.myf1(2) + func1(4)) != 8; ++} ++ ++/* { dg-final { scan-ipa-dump-not "The call is substituted by.*" "icp" } } */ ++/* { dg-final { scan-ipa-dump "STATS: 4 candidates for indirect call promotion, 0 substituted, 0 speculatively promoted, 0 cold" "icp" } } */ +diff --git a/gcc/testsuite/gcc.dg/icp6.c b/gcc/testsuite/gcc.dg/icp6.c +new file mode 100644 +index 000000000..5a9f15045 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/icp6.c +@@ -0,0 +1,66 @@ ++/* { dg-do run } */ ++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp6.c.077i.icp -Wno-int-conversion -Wno-incompatible-pointer-types" } */ ++int dummy = 0; ++ ++typedef int (*ftype1)(int a); ++typedef float (*ftype2)(int a); ++typedef int (*ftype3)(); ++typedef int (*ftype4)(int a, int b); ++ ++ftype1 func1; ++ftype4 func2; ++ ++struct { ++ int a; ++ int* b; ++ ftype1 myf1; ++ ftype2 myf2; ++ ftype3 myf3; ++} my_str; ++ ++int foo3(float a) { ++ return dummy; ++} ++ ++int foo4(int a, int b) { ++ return a*b; ++} ++ ++int foo(int a) { ++ my_str.myf1 = func1; ++ if (a % 2 == 0) ++ dummy += dummy % (dummy - a); ++ return a + 1; ++} ++ ++int foo2(float a) { ++ func1 = (ftype1) &foo; ++ func2 = &foo4; ++ return dummy + foo3 (a); ++} ++ ++float bar2(int a) { ++ my_str.myf2 = (ftype2)(0x864213); ++ func2 = 0x65378; ++ return foo(a); ++} ++ ++float bar(int a) { ++ my_str.myf3 = &foo2; ++ my_str.myf2 = &bar; ++ func1 = (ftype1) &dummy; ++ func2 = (ftype4) &bar2; ++ return foo(a); ++} ++ ++int main() { ++ bar(1); ++ bar2(1); ++ bar(0); ++ my_str.myf2(3); ++ ((ftype1) my_str.myf3)(0.0); ++ int sum = func1(4); ++ return (sum + my_str.myf1(2) + func2(5, 6)) != 38; ++} ++/* { dg-final { scan-ipa-dump "The call is substituted by.*foo2 \\(0\\);" "icp" } } */ ++/* { dg-final { scan-ipa-dump "STATS: 5 candidates for indirect call promotion, 1 substituted, 0 speculatively promoted, 0 cold" "icp" } } */ +diff --git a/gcc/testsuite/gcc.dg/icp7.c b/gcc/testsuite/gcc.dg/icp7.c +new file mode 100644 +index 000000000..fa52197f4 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/icp7.c +@@ -0,0 +1,48 @@ ++/* { dg-do run } */ ++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp7.c.077i.icp" } */ ++ ++#include ++ ++int dummy = 0; ++ ++typedef int (*ftype1)(int a); ++typedef float (*ftype2)(int a); ++ ++ftype1 func1; ++ ++struct { ++ int a; ++ int* b; ++ ftype1 myf1; ++ ftype2 myf2; ++} my_str; ++ ++int boo(int a, ...) { ++ va_list ap; ++ va_start(ap, a); ++ if (a == 0) ++ dummy += va_arg(ap, int); ++ va_end(ap); ++ return dummy; ++} ++ ++int foo(int a) { ++ my_str.myf1 = func1; ++ if (a % 2 == 0) ++ dummy += dummy % (dummy - a); ++ return a + 1; ++} ++ ++float bar(int a) { ++ my_str.myf2 = &bar; ++ func1 = (ftype1) &boo; ++ return foo(a); ++} ++ ++int main() { ++ bar(1); ++ my_str.myf2(3); ++ return (my_str.myf1(2) + func1(4)); ++} ++ ++/* { dg-final { scan-ipa-dump "Address taken function with varargs is found. Skip the optimization." "icp" } } */ +diff --git a/gcc/timevar.def b/gcc/timevar.def +index 98a5a490f..ca4156066 100644 +--- a/gcc/timevar.def ++++ b/gcc/timevar.def +@@ -71,6 +71,7 @@ DEFTIMEVAR (TV_CGRAPHOPT , "callgraph optimization") + DEFTIMEVAR (TV_CGRAPH_FUNC_EXPANSION , "callgraph functions expansion") + DEFTIMEVAR (TV_CGRAPH_IPA_PASSES , "callgraph ipa passes") + DEFTIMEVAR (TV_IPA_ODR , "ipa ODR types") ++DEFTIMEVAR (TV_IPA_ICP , "ipa indirect call promotion") + DEFTIMEVAR (TV_IPA_FNSUMMARY , "ipa function summary") + DEFTIMEVAR (TV_IPA_UNREACHABLE , "ipa dead code removal") + DEFTIMEVAR (TV_IPA_INHERITANCE , "ipa inheritance graph") +diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h +index 56898e019..5f09e4f8b 100644 +--- a/gcc/tree-pass.h ++++ b/gcc/tree-pass.h +@@ -524,6 +524,7 @@ extern ipa_opt_pass_d *make_pass_ipa_cp (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_sra (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_icf (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_devirt (gcc::context *ctxt); ++extern ipa_opt_pass_d *make_pass_ipa_icp (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_odr (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_reference (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_pure_const (gcc::context *ctxt); +-- +2.33.0 + diff --git a/0041-Port-fixes-in-icp-to-GCC-12.patch b/0041-Port-fixes-in-icp-to-GCC-12.patch new file mode 100644 index 0000000000000000000000000000000000000000..723f8b074caf1b33cdbca7e49ece489fcb4a7ba7 --- /dev/null +++ b/0041-Port-fixes-in-icp-to-GCC-12.patch @@ -0,0 +1,100 @@ +From aaa117a9ff58fb208e8c8859e075ca425f995f63 Mon Sep 17 00:00:00 2001 +From: Diachkov Ilia +Date: Tue, 27 Feb 2024 07:43:57 +0800 +Subject: [PATCH 07/18] Port fixes in icp to GCC 12 + +--- + gcc/ipa-devirt.cc | 37 ++++++++++++++++++++++++++++++------- + 1 file changed, 30 insertions(+), 7 deletions(-) + +diff --git a/gcc/ipa-devirt.cc b/gcc/ipa-devirt.cc +index 383839189..318535d06 100644 +--- a/gcc/ipa-devirt.cc ++++ b/gcc/ipa-devirt.cc +@@ -4431,6 +4431,11 @@ print_type_set(unsigned ftype_uid, type_alias_map *map) + if (!map->count (ftype_uid)) + return; + type_set* s = (*map)[ftype_uid]; ++ if (!s) ++ { ++ fprintf (dump_file, "%d (no set)", ftype_uid); ++ return; ++ } + for (type_set::const_iterator it = s->begin (); it != s->end (); it++) + fprintf (dump_file, it == s->begin () ? "%d" : ", %d", *it); + } +@@ -4696,12 +4701,19 @@ maybe_register_aliases (tree type1, tree type2) + if (register_ailas_type (type1, type2, ta_map)) + analyze_pointees (type1, type2); + } ++ unsigned type1_uid = TYPE_UID (type1); ++ unsigned type2_uid = TYPE_UID (type2); ++ if (type_uid_map->count (type1_uid) == 0) ++ (*type_uid_map)[type1_uid] = type1; ++ if (type_uid_map->count (type2_uid) == 0) ++ (*type_uid_map)[type2_uid] = type2; ++ + /* If function and non-function type pointers alias, + the function type is unsafe. */ + if (FUNCTION_POINTER_TYPE_P (type1) && !FUNCTION_POINTER_TYPE_P (type2)) +- unsafe_types->insert (TYPE_UID (type1)); ++ unsafe_types->insert (type1_uid); + if (FUNCTION_POINTER_TYPE_P (type2) && !FUNCTION_POINTER_TYPE_P (type1)) +- unsafe_types->insert (TYPE_UID (type2)); ++ unsafe_types->insert (type2_uid); + + /* Try to figure out with pointers to incomplete types. */ + if (POINTER_TYPE_P (type1) && POINTER_TYPE_P (type2)) +@@ -4825,10 +4837,12 @@ compare_block_and_init_type (tree block, tree t1) + static void + analyze_global_var (varpool_node *var) + { +- var->get_constructor(); + tree decl = var->decl; +- if (TREE_CODE (decl) == SSA_NAME || !DECL_INITIAL (decl) +- || integer_zerop (DECL_INITIAL (decl))) ++ if (decl || !DECL_INITIAL (decl)) ++ return; ++ var->get_constructor (); ++ if (TREE_CODE (decl) == SSA_NAME || integer_zerop (DECL_INITIAL (decl)) ++ || TREE_CODE (DECL_INITIAL (decl)) == ERROR_MARK) + return; + + if (dump_file && (dump_flags & TDF_DETAILS)) +@@ -4998,7 +5012,9 @@ analyze_assign_stmt (gimple *stmt) + { + rhs = TREE_OPERAND (rhs, 0); + if (VAR_OR_FUNCTION_DECL_P (rhs) || TREE_CODE (rhs) == STRING_CST +- || TREE_CODE (rhs) == ARRAY_REF || TREE_CODE (rhs) == PARM_DECL) ++ || TREE_CODE (rhs) == ARRAY_REF || TREE_CODE (rhs) == PARM_DECL ++ || TREE_CODE (rhs) == LABEL_DECL || TREE_CODE (rhs) == CONST_DECL ++ || TREE_CODE (rhs) == RESULT_DECL) + rhs_type = build_pointer_type (TREE_TYPE (rhs)); + else if (TREE_CODE (rhs) == COMPONENT_REF) + { +@@ -5012,7 +5028,12 @@ analyze_assign_stmt (gimple *stmt) + gcc_assert (POINTER_TYPE_P (rhs_type)); + } + else +- gcc_unreachable(); ++ { ++ fprintf (dump_file, "\nUnsupported rhs type %s in assign stmt: ", ++ get_tree_code_name (TREE_CODE (rhs))); ++ print_gimple_stmt (dump_file, stmt, 0); ++ gcc_unreachable (); ++ } + } + else + rhs_type = TREE_TYPE (rhs); +@@ -5710,6 +5731,8 @@ merge_fs_map_for_ftype_aliases () + decl_set *d_set = it1->second; + tree type = (*type_uid_map)[it1->first]; + type_set *set = (*fta_map)[it1->first]; ++ if (!set) ++ continue; + for (type_set::const_iterator it2 = set->begin (); + it2 != set->end (); it2++) + { +-- +2.33.0 + diff --git a/0042-Add-split-complex-instructions-pass.patch b/0042-Add-split-complex-instructions-pass.patch new file mode 100644 index 0000000000000000000000000000000000000000..b73affdc48bd22b9c62c6c491c28a45b27c33a9f --- /dev/null +++ b/0042-Add-split-complex-instructions-pass.patch @@ -0,0 +1,1245 @@ +From 9a8e5716543972dec36bae1f9d380d27bfbcdae1 Mon Sep 17 00:00:00 2001 +From: Agrachev Andrey WX1228450 +Date: Mon, 21 Aug 2023 12:35:19 +0300 +Subject: [PATCH 09/18] Add split-complex-instructions pass + + - Add option -fsplit-ldp-stp + - Add functionality to detect and split depended from store LDP instructions. + - Add -param=param-ldp-dependency-search-range= to configure ldp dependency search range + - Add RTL tests + +Co-authored-by: Chernonog Vyacheslav 00812786 +Co-authored-by: Zinin Ivan WX1305386 +Co-authored-by: Gadzhiev Emin WX1195297 +--- + gcc/common.opt | 5 + + gcc/config/aarch64/aarch64.cc | 42 ++ + gcc/doc/tm.texi | 8 + + gcc/doc/tm.texi.in | 4 + + gcc/params.opt | 3 + + gcc/passes.def | 1 + + gcc/sched-rgn.cc | 704 +++++++++++++++++- + gcc/target.def | 10 + + .../gcc.dg/rtl/aarch64/test-ldp-dont-split.c | 74 ++ + .../rtl/aarch64/test-ldp-split-rearrange.c | 40 + + .../gcc.dg/rtl/aarch64/test-ldp-split.c | 174 +++++ + gcc/timevar.def | 1 + + gcc/tree-pass.h | 1 + + 13 files changed, 1066 insertions(+), 1 deletion(-) + create mode 100644 gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c + create mode 100644 gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c + create mode 100644 gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c + +diff --git a/gcc/common.opt b/gcc/common.opt +index a42bee250..c0e3f5687 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1797,6 +1797,11 @@ floop-nest-optimize + Common Var(flag_loop_nest_optimize) Optimization + Enable the loop nest optimizer. + ++fsplit-ldp-stp ++Common Var(flag_split_ldp_stp) Optimization ++Split load/store pair instructions into separate load/store operations ++for better performance. ++ + fstrict-volatile-bitfields + Common Var(flag_strict_volatile_bitfields) Init(-1) Optimization + Force bitfield accesses to match their type width. +diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc +index 04072ca25..48e2eded0 100644 +--- a/gcc/config/aarch64/aarch64.cc ++++ b/gcc/config/aarch64/aarch64.cc +@@ -27507,6 +27507,48 @@ aarch64_run_selftests (void) + + #endif /* #if CHECKING_P */ + ++/* TODO: refuse to use ranges intead of full list of an instruction codes. */ ++ ++bool ++is_aarch64_ldp_insn (int icode) ++{ ++ if ((icode >= CODE_FOR_load_pair_sw_sisi ++ && icode <= CODE_FOR_load_pair_dw_tftf) ++ || (icode >= CODE_FOR_loadwb_pairsi_si ++ && icode <= CODE_FOR_loadwb_pairtf_di) ++ || (icode >= CODE_FOR_load_pairv8qiv8qi ++ && icode <= CODE_FOR_load_pairdfdf) ++ || (icode >= CODE_FOR_load_pairv16qiv16qi ++ && icode <= CODE_FOR_load_pairv8bfv2df) ++ || (icode >= CODE_FOR_load_pair_lanesv8qi ++ && icode <= CODE_FOR_load_pair_lanesdf)) ++ return true; ++ return false; ++} ++ ++bool ++is_aarch64_stp_insn (int icode) ++{ ++ if ((icode >= CODE_FOR_store_pair_sw_sisi ++ && icode <= CODE_FOR_store_pair_dw_tftf) ++ || (icode >= CODE_FOR_storewb_pairsi_si ++ && icode <= CODE_FOR_storewb_pairtf_di) ++ || (icode >= CODE_FOR_vec_store_pairv8qiv8qi ++ && icode <= CODE_FOR_vec_store_pairdfdf) ++ || (icode >= CODE_FOR_vec_store_pairv16qiv16qi ++ && icode <= CODE_FOR_vec_store_pairv8bfv2df) ++ || (icode >= CODE_FOR_store_pair_lanesv8qi ++ && icode <= CODE_FOR_store_pair_lanesdf)) ++ return true; ++ return false; ++} ++ ++#undef TARGET_IS_LDP_INSN ++#define TARGET_IS_LDP_INSN is_aarch64_ldp_insn ++ ++#undef TARGET_IS_STP_INSN ++#define TARGET_IS_STP_INSN is_aarch64_stp_insn ++ + #undef TARGET_STACK_PROTECT_GUARD + #define TARGET_STACK_PROTECT_GUARD aarch64_stack_protect_guard + +diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi +index c5006afc0..0c6415a9c 100644 +--- a/gcc/doc/tm.texi ++++ b/gcc/doc/tm.texi +@@ -12113,6 +12113,14 @@ object files that are not referenced from @code{main} and uses export + lists. + @end defmac + ++@deftypefn {Target Hook} bool TARGET_IS_LDP_INSN (int @var{icode}) ++Return true if icode is corresponding to any of the LDP instruction types. ++@end deftypefn ++ ++@deftypefn {Target Hook} bool TARGET_IS_STP_INSN (int @var{icode}) ++Return true if icode is corresponding to any of the STP instruction types. ++@end deftypefn ++ + @deftypefn {Target Hook} bool TARGET_CANNOT_MODIFY_JUMPS_P (void) + This target hook returns @code{true} past the point in which new jump + instructions could be created. On machines that require a register for +diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in +index f869ddd5e..6ff60e562 100644 +--- a/gcc/doc/tm.texi.in ++++ b/gcc/doc/tm.texi.in +@@ -7977,6 +7977,10 @@ object files that are not referenced from @code{main} and uses export + lists. + @end defmac + ++@hook TARGET_IS_LDP_INSN ++ ++@hook TARGET_IS_STP_INSN ++ + @hook TARGET_CANNOT_MODIFY_JUMPS_P + + @hook TARGET_HAVE_CONDITIONAL_EXECUTION +diff --git a/gcc/params.opt b/gcc/params.opt +index 7fcc2398d..6176d4790 100644 +--- a/gcc/params.opt ++++ b/gcc/params.opt +@@ -1217,4 +1217,7 @@ Enum(vrp_mode) String(ranger) Value(VRP_MODE_RANGER) + Common Joined UInteger Var(param_pointer_compression_size) Init(32) IntegerRange(8, 32) Param Optimization + Target size of compressed pointer, which should be 8, 16 or 32. + ++-param=param-ldp-dependency-search-range= ++Common Joined UInteger Var(param_ldp_dependency_search_range) Init(16) IntegerRange(1, 32) Param Optimization ++Range for depended ldp search in split-ldp-stp path. + ; This comment is to ensure we retain the blank line above. +diff --git a/gcc/passes.def b/gcc/passes.def +index 941bbadf0..a30e05688 100644 +--- a/gcc/passes.def ++++ b/gcc/passes.def +@@ -514,6 +514,7 @@ along with GCC; see the file COPYING3. If not see + NEXT_PASS (pass_reorder_blocks); + NEXT_PASS (pass_leaf_regs); + NEXT_PASS (pass_split_before_sched2); ++ NEXT_PASS (pass_split_complex_instructions); + NEXT_PASS (pass_sched2); + NEXT_PASS (pass_stack_regs); + PUSH_INSERT_PASSES_WITHIN (pass_stack_regs) +diff --git a/gcc/sched-rgn.cc b/gcc/sched-rgn.cc +index a0dfdb788..b4df8bdc5 100644 +--- a/gcc/sched-rgn.cc ++++ b/gcc/sched-rgn.cc +@@ -44,6 +44,8 @@ along with GCC; see the file COPYING3. If not see + are actually scheduled. */ + + #include "config.h" ++#define INCLUDE_SET ++#define INCLUDE_VECTOR + #include "system.h" + #include "coretypes.h" + #include "backend.h" +@@ -65,6 +67,7 @@ along with GCC; see the file COPYING3. If not see + #include "dbgcnt.h" + #include "pretty-print.h" + #include "print-rtl.h" ++#include "cfgrtl.h" + + /* Disable warnings about quoting issues in the pp_xxx calls below + that (intentionally) don't follow GCC diagnostic conventions. */ +@@ -3951,6 +3954,705 @@ make_pass_sched_fusion (gcc::context *ctxt) + return new pass_sched_fusion (ctxt); + } + ++namespace { ++ ++/* Def-use analisys special functions implementation. */ ++ ++static struct df_link * ++get_defs (rtx_insn *insn, rtx reg) ++{ ++ df_ref use; ++ struct df_link *ref_chain, *ref_link; ++ ++ FOR_EACH_INSN_USE (use, insn) ++ { ++ if (GET_CODE (DF_REF_REG (use)) == SUBREG) ++ return NULL; ++ if (REGNO (DF_REF_REG (use)) == REGNO (reg)) ++ break; ++ } ++ ++ gcc_assert (use != NULL); ++ ++ ref_chain = DF_REF_CHAIN (use); ++ ++ for (ref_link = ref_chain; ref_link; ref_link = ref_link->next) ++ { ++ /* Problem getting some definition for this instruction. */ ++ if (ref_link->ref == NULL) ++ return NULL; ++ if (DF_REF_INSN_INFO (ref_link->ref) == NULL) ++ return NULL; ++ /* As global regs are assumed to be defined at each function call ++ dataflow can report a call_insn as being a definition of REG. ++ But we can't do anything with that in this pass so proceed only ++ if the instruction really sets REG in a way that can be deduced ++ from the RTL structure. */ ++ if (global_regs[REGNO (reg)] ++ && !set_of (reg, DF_REF_INSN (ref_link->ref))) ++ return NULL; ++ } ++ ++ return ref_chain; ++} ++ ++static struct df_link * ++get_uses (rtx_insn *insn, rtx reg) ++{ ++ df_ref def; ++ struct df_link *ref_chain, *ref_link; ++ ++ FOR_EACH_INSN_DEF (def, insn) ++ if (REGNO (DF_REF_REG (def)) == REGNO (reg)) ++ break; ++ ++ gcc_assert (def != NULL && "Broken def-use analisys chain."); ++ ++ ref_chain = DF_REF_CHAIN (def); ++ ++ for (ref_link = ref_chain; ref_link; ref_link = ref_link->next) ++ { ++ /* Problem getting some use for this instruction. */ ++ if (ref_link->ref == NULL) ++ return NULL; ++ } ++ ++ return ref_chain; ++} ++ ++const pass_data pass_data_split_complex_instructions = { ++ RTL_PASS, /* Type. */ ++ "split_complex_instructions", /* Name. */ ++ OPTGROUP_NONE, /* Optinfo_flags. */ ++ TV_SPLIT_CMP_INS, /* Tv_id. */ ++ 0, /* Properties_required. */ ++ 0, /* Properties_provided. */ ++ 0, /* Properties_destroyed. */ ++ 0, /* Todo_flags_start. */ ++ (TODO_df_verify | TODO_df_finish), /* Todo_flags_finish. */ ++}; ++ ++class pass_split_complex_instructions : public rtl_opt_pass ++{ ++private: ++ enum complex_instructions_t ++ { ++ UNDEFINED, ++ LDP, ++ LDP_TI, ++ STP, ++ STR ++ }; ++ ++ void split_complex_insn (rtx_insn *insn); ++ void split_ldp_ti (rtx_insn *insn); ++ void split_ldp_with_offset (rtx_insn *ldp_insn); ++ void split_simple_ldp (rtx_insn *ldp_insn); ++ void split_ldp_stp (rtx_insn *insn); ++ complex_instructions_t get_insn_type (rtx_insn *insn); ++ ++ basic_block bb; ++ rtx_insn *insn; ++ std::set dependent_stores_candidates; ++ std::set ldp_to_split_list; ++ ++ complex_instructions_t complex_insn_type = UNDEFINED; ++ bool is_store_insn (rtx_insn *insn); ++ bool is_ldp_dependent_on_store (rtx_insn *ldp_insn, basic_block bb); ++ bool bfs_for_reg_dependent_store (rtx_insn *ldp_insn, basic_block search_bb, ++ rtx_insn *search_insn, ++ int search_range ++ = param_ldp_dependency_search_range); ++ bool is_store_reg_dependent (rtx_insn *ldp_insn, rtx_insn *str_insn); ++ void init_df (); ++ void find_dependent_stores_candidates (rtx_insn *ldp_insn); ++ int get_insn_offset (rtx_insn *insn, complex_instructions_t insn_type, ++ int *arith_operation_ptr = NULL); ++ ++public: ++ pass_split_complex_instructions (gcc::context *ctxt) ++ : rtl_opt_pass (pass_data_split_complex_instructions, ctxt) ++ { ++ } ++ /* opt_pass methods: */ ++ virtual bool gate (function *); ++ ++ virtual unsigned int ++ execute (function *) ++ { ++ enum rtx_code ldp_memref_code; ++ init_df (); ++ ldp_to_split_list.clear (); ++ FOR_EACH_BB_FN (bb, cfun) ++ { ++ FOR_BB_INSNS (bb, insn) ++ { ++ complex_instructions_t insn_type = get_insn_type (insn); ++ /* TODO: Add splitting of STP instructions. */ ++ if (insn_type != LDP && insn_type != LDP_TI) ++ continue; ++ /* TODO: Currently support only ldp_ti and ldp with REG or ++ PLUS/MINUS offset expression. */ ++ if (insn_type == LDP_TI) ++ { ++ ldp_memref_code = GET_CODE (XEXP (XEXP (PATTERN (insn), 1), ++ 0)); ++ if (ldp_memref_code != REG && ldp_memref_code != PLUS ++ && ldp_memref_code != MINUS) ++ continue; ++ } ++ if (is_ldp_dependent_on_store (insn, bb)) ++ { ++ ldp_to_split_list.insert (insn); ++ } ++ } ++ } ++ ++ for (std::set::iterator i = ldp_to_split_list.begin (); ++ i != ldp_to_split_list.end (); ++i) ++ split_complex_insn (*i); ++ ++ return 0; ++ } ++}; // class pass_split_complex_instructions ++ ++bool ++pass_split_complex_instructions::is_ldp_dependent_on_store (rtx_insn *ldp_insn, ++ basic_block bb) ++{ ++ find_dependent_stores_candidates (ldp_insn); ++ return bfs_for_reg_dependent_store (ldp_insn, bb, ldp_insn); ++} ++ ++bool ++pass_split_complex_instructions::bfs_for_reg_dependent_store ( ++ rtx_insn *ldp_insn, basic_block search_bb, rtx_insn *search_insn, ++ int search_range) ++{ ++ rtx_insn *current_search_insn = search_insn; ++ ++ for (int i = search_range; i > 0; --i) ++ { ++ if (!current_search_insn) ++ return false; ++ bool checking_result ++ = is_store_reg_dependent (ldp_insn, current_search_insn); ++ if (checking_result) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "LDP to split:\n"); ++ print_rtl_single (dump_file, ldp_insn); ++ fprintf (dump_file, "Found STR:\n"); ++ print_rtl_single (dump_file, current_search_insn); ++ } ++ return true; ++ } ++ if (current_search_insn == BB_HEAD (search_bb)) ++ { ++ /* Search in all parent BBs for the reg_dependent store. */ ++ edge_iterator ei; ++ edge e; ++ ++ FOR_EACH_EDGE (e, ei, search_bb->preds) ++ if (e->src->index != 0 ++ && bfs_for_reg_dependent_store (ldp_insn, e->src, ++ BB_END (e->src), i - 1)) ++ return true; ++ return false; ++ } ++ else ++ { ++ if (!active_insn_p (current_search_insn)) ++ i++; ++ current_search_insn = PREV_INSN (current_search_insn); ++ } ++ } ++ return false; ++} ++ ++void ++pass_split_complex_instructions::init_df () ++{ ++ df_set_flags (DF_RD_PRUNE_DEAD_DEFS); ++ df_chain_add_problem (DF_UD_CHAIN + DF_DU_CHAIN); ++ df_mir_add_problem (); ++ df_live_add_problem (); ++ df_live_set_all_dirty (); ++ df_analyze (); ++ df_set_flags (DF_DEFER_INSN_RESCAN); ++} ++ ++void ++pass_split_complex_instructions::find_dependent_stores_candidates ( ++ rtx_insn *ldp_insn) ++{ ++ dependent_stores_candidates.clear (); ++ df_ref use; ++ ++ FOR_EACH_INSN_USE (use, ldp_insn) ++ { ++ df_link *defs = get_defs (ldp_insn, DF_REF_REG (use)); ++ if (!defs) ++ return; ++ ++ for (df_link *def = defs; def; def = def->next) ++ { ++ df_link *uses ++ = get_uses (DF_REF_INSN (def->ref), DF_REF_REG (def->ref)); ++ if (!uses) ++ continue; ++ ++ for (df_link *use = uses; use; use = use->next) ++ { ++ if (DF_REF_CLASS (use->ref) == DF_REF_REGULAR ++ && is_store_insn (DF_REF_INSN (use->ref))) ++ dependent_stores_candidates.insert (DF_REF_INSN (use->ref)); ++ } ++ } ++ } ++} ++ ++bool ++pass_split_complex_instructions::is_store_reg_dependent (rtx_insn *ldp_insn, ++ rtx_insn *str_insn) ++{ ++ if (!is_store_insn (str_insn) ++ || dependent_stores_candidates.find (str_insn) ++ == dependent_stores_candidates.end ()) ++ return false; ++ ++ int ldp_offset_sign = UNDEFINED; ++ int ldp_offset ++ = get_insn_offset (ldp_insn, get_insn_type (ldp_insn), &ldp_offset_sign); ++ if (ldp_offset_sign == MINUS) ++ ldp_offset = -ldp_offset; ++ ++ int str_offset_sign = UNDEFINED; ++ int str_offset = get_insn_offset (str_insn, STR, &str_offset_sign); ++ if (str_offset_sign == MINUS) ++ str_offset = -str_offset; ++ ++ if (str_offset == ldp_offset || str_offset == ldp_offset + 8) ++ return true; ++ ++ return false; ++} ++ ++bool ++pass_split_complex_instructions::is_store_insn (rtx_insn *insn) ++{ ++ if (!insn) ++ return false; ++ rtx sset_b = single_set (insn); ++ /* TODO: The condition below allow to take only store instructions in which ++ the memory location's operand is either a register (base) or an plus/minus ++ operation (base + #imm). So it might make sense to add support for other ++ cases (e.g. multiply and shift). */ ++ if (sset_b && MEM_P (SET_DEST (sset_b)) ++ && GET_MODE (XEXP (sset_b, 0)) != BLKmode ++ && (GET_CODE (XEXP (XEXP (sset_b, 0), 0)) == REG ++ || (GET_CODE (XEXP (XEXP (sset_b, 0), 0)) == PLUS ++ || GET_CODE (XEXP (XEXP (sset_b, 0), 0)) == MINUS) ++ && (GET_CODE (XEXP (XEXP (XEXP (sset_b, 0), 0), 1)) == CONST_INT))) ++ return true; ++ ++ return false; ++} ++ ++int ++pass_split_complex_instructions::get_insn_offset ( ++ rtx_insn *insn, complex_instructions_t insn_type, int *arith_operation_ptr) ++{ ++ rtx insn_pat = PATTERN (insn); ++ int returned_offset = 0; ++ ++ rtx offset_expr = NULL; ++ rtx offset_value_expr = NULL; ++ ++ switch (insn_type) ++ { ++ case LDP: ++ { ++ int number_of_sub_insns = XVECLEN (insn_pat, 0); ++ ++ /* Calculate it's own ofsset of first load insn. */ ++ rtx_insn *first_load_insn = NULL; ++ if (number_of_sub_insns == 2) ++ { ++ first_load_insn ++ = make_insn_raw (copy_rtx (XVECEXP (insn_pat, 0, 0))); ++ arith_operation_ptr = NULL; ++ ++ offset_expr = XEXP (XEXP (PATTERN (first_load_insn), 1), 0); ++ if (GET_CODE (offset_expr) == PLUS ++ || GET_CODE (offset_expr) == MINUS) ++ offset_value_expr ++ = XEXP (XEXP (XEXP (PATTERN (first_load_insn), 1), 0), 1); ++ else ++ offset_expr = NULL; ++ } ++ else if (number_of_sub_insns == 3) ++ { ++ rtx_insn *offset_sub_insn ++ = make_insn_raw (copy_rtx (XVECEXP (insn_pat, 0, 0))); ++ ++ offset_expr = XEXP (PATTERN (offset_sub_insn), 1); ++ offset_value_expr = XEXP (XEXP (PATTERN (offset_sub_insn), 1), 1); ++ } ++ else ++ { ++ gcc_assert (false ++ && "Wrong number of elements in the ldp_insn vector"); ++ } ++ break; ++ } ++ case LDP_TI: ++ { ++ offset_expr = XEXP (XEXP (insn_pat, 1), 0); ++ if (GET_CODE (offset_expr) != PLUS && GET_CODE (offset_expr) != MINUS) ++ return 0; ++ offset_value_expr = XEXP (XEXP (XEXP (insn_pat, 1), 0), 1); ++ break; ++ } ++ case STR: ++ { ++ offset_expr = XEXP (XEXP (insn_pat, 0), 0); ++ /* If memory location is specified by single base register then the ++ offset is zero. */ ++ if (GET_CODE (offset_expr) == REG) ++ return 0; ++ offset_value_expr = XEXP (XEXP (XEXP (insn_pat, 0), 0), 1); ++ break; ++ } ++ default: ++ { ++ if (dumps_are_enabled && dump_file) ++ { ++ fprintf (dump_file, "Instruction that was tried to split:\n"); ++ print_rtl_single (dump_file, insn); ++ } ++ gcc_assert (false && "Unsupported instruction type"); ++ break; ++ } ++ } ++ ++ if (offset_expr != NULL && offset_value_expr ++ && GET_CODE (offset_value_expr) == CONST_INT) ++ returned_offset = XINT (offset_value_expr, 0); ++ ++ if (arith_operation_ptr != NULL) ++ { ++ *arith_operation_ptr = GET_CODE (offset_expr); ++ gcc_assert ((*arith_operation_ptr == MINUS ++ || *arith_operation_ptr == PLUS) ++ && "Unexpected arithmetic operation in the offset expr"); ++ } ++ ++ return returned_offset; ++} ++ ++void ++pass_split_complex_instructions::split_simple_ldp (rtx_insn *ldp_insn) ++{ ++ rtx pat = PATTERN (ldp_insn); ++ ++ rtx_insn *mem_insn_1 = make_insn_raw (copy_rtx (XVECEXP (pat, 0, 0))); ++ rtx_insn *mem_insn_2 = make_insn_raw (copy_rtx (XVECEXP (pat, 0, 1))); ++ ++ int dest_regno = REGNO (SET_DEST (PATTERN (mem_insn_1))); ++ int src_regno; ++ ++ rtx srs_reg_insn = XEXP (SET_SRC (PATTERN (mem_insn_1)), 0); ++ ++ if (GET_CODE (srs_reg_insn) == REG) ++ src_regno = REGNO (srs_reg_insn); ++ else ++ src_regno = REGNO (XEXP (srs_reg_insn, 0)); ++ ++ rtx_insn *emited_insn_1, *emited_insn_2; ++ ++ /* in cases like ldp r1,r2,[r1] we emit ldr r2,[r1] first. */ ++ if (src_regno == dest_regno) ++ std::swap (mem_insn_1, mem_insn_2); ++ ++ emited_insn_1 = emit_insn (PATTERN (mem_insn_1)); ++ emited_insn_2 = emit_insn (PATTERN (mem_insn_2)); ++ ++ int sub_insn_1_code = recog (PATTERN (mem_insn_1), mem_insn_1, 0); ++ int sub_insn_2_code = recog (PATTERN (mem_insn_2), mem_insn_2, 0); ++ ++ INSN_CODE (emited_insn_1) = sub_insn_1_code; ++ INSN_CODE (emited_insn_2) = sub_insn_2_code; ++} ++ ++void ++pass_split_complex_instructions::split_ldp_with_offset (rtx_insn *ldp_insn) ++{ ++ rtx pat = PATTERN (ldp_insn); ++ bool post_index = true; ++ ++ rtx_insn offset_insn; ++ rtx_insn mem_insn_1; ++ rtx_insn mem_insn_2; ++ ++ int offset_insn_code; ++ int mem_insn_1_code = -1; ++ int mem_insn_2_code = -1; ++ ++ int offset = 0; ++ int arith_operation = UNDEFINED; ++ ++ for (int i = 0; i < 3; i++) ++ { ++ rtx sub_insn = XVECEXP (pat, 0, i); ++ rtx_insn *copy_of_sub_insn = make_insn_raw (copy_rtx (sub_insn)); ++ int sub_insn_code ++ = recog (PATTERN (copy_of_sub_insn), copy_of_sub_insn, 0); ++ ++ /* If sub_insn is offset related. */ ++ if (GET_RTX_CLASS (sub_insn_code) == RTX_UNARY) ++ { ++ offset_insn = *copy_of_sub_insn; ++ offset_insn_code = sub_insn_code; ++ gcc_assert (i == 0 ++ && "Offset related insn must be the first " ++ "element of a parallel insn vector"); ++ ++ offset = get_insn_offset (ldp_insn, LDP, &arith_operation); ++ } ++ else ++ { ++ if (GET_CODE (XEXP (PATTERN (copy_of_sub_insn), 0)) != REG) ++ { ++ rtx &offset_expr ++ = XEXP (XEXP (XEXP (PATTERN (copy_of_sub_insn), 0), 0), 1); ++ if (GET_CODE (offset_expr) == CONST_INT) ++ { ++ int local_offset = XINT (offset_expr, 0); ++ offset = (arith_operation == PLUS ? offset : -offset); ++ ++ offset_expr = GEN_INT (local_offset + offset); ++ ++ gcc_assert ( ++ (arith_operation == MINUS || arith_operation == PLUS) ++ && "Unexpected arithmetic operation in offset related " ++ "sub_insn"); ++ ++ if (i == 1) ++ post_index = false; ++ } ++ else ++ { ++ post_index = true; ++ } ++ } ++ } ++ if (i == 1) ++ { ++ mem_insn_1 = *copy_of_sub_insn; ++ mem_insn_1_code = sub_insn_code; ++ } ++ if (i == 2) ++ { ++ mem_insn_2 = *copy_of_sub_insn; ++ mem_insn_2_code = sub_insn_code; ++ } ++ } ++ gcc_assert (mem_insn_1_code != -1 && mem_insn_2_code != -1 ++ && "Uninitialized memory insns"); ++ ++ int dest_regno = REGNO (SET_DEST (PATTERN (&mem_insn_1))); ++ int src_regno; ++ ++ rtx srs_reg_insn = XEXP (SET_SRC (PATTERN (&mem_insn_1)), 0); ++ ++ if (GET_CODE (srs_reg_insn) == REG) ++ src_regno = REGNO (srs_reg_insn); ++ else ++ src_regno = REGNO (XEXP (srs_reg_insn, 0)); ++ ++ /* Don't split such weird LDP. */ ++ if (src_regno == dest_regno) ++ return; ++ ++ rtx_insn *emited_offset_insn; ++ if (!post_index) ++ { ++ emited_offset_insn = emit_insn (PATTERN (&offset_insn)); ++ INSN_CODE (emited_offset_insn) = offset_insn_code; ++ } ++ ++ rtx_insn *emited_insn_1 = emit_insn (PATTERN (&mem_insn_1)); ++ rtx_insn *emited_insn_2 = emit_insn (PATTERN (&mem_insn_2)); ++ ++ ++ INSN_CODE (emited_insn_1) = mem_insn_1_code; ++ INSN_CODE (emited_insn_2) = mem_insn_2_code; ++ ++ if (post_index) ++ { ++ emited_offset_insn = emit_insn (PATTERN (&offset_insn)); ++ INSN_CODE (emited_offset_insn) = offset_insn_code; ++ } ++} ++ ++void ++pass_split_complex_instructions::split_ldp_stp (rtx_insn *insn) ++{ ++ rtx_insn *prev_insn = PREV_INSN (insn); ++ int number_of_sub_insns = XVECLEN (PATTERN (insn), 0); ++ ++ start_sequence (); ++ ++ if (number_of_sub_insns == 2) ++ split_simple_ldp (insn); ++ else if (number_of_sub_insns == 3) ++ split_ldp_with_offset (insn); ++ else ++ gcc_assert (false && "Broken complex insn vector"); ++ ++ rtx_insn *seq = get_insns (); ++ unshare_all_rtl_in_chain (seq); ++ end_sequence (); ++ ++ emit_insn_after_setloc (seq, prev_insn, INSN_LOCATION (insn)); ++ delete_insn_and_edges (insn); ++} ++ ++void ++pass_split_complex_instructions::split_ldp_ti (rtx_insn *insn) ++{ ++ rtx_insn *prev_insn = PREV_INSN (insn); ++ rtx_insn *load_insn_1 = make_insn_raw (copy_rtx (PATTERN (insn))); ++ rtx_insn *load_insn_2 = make_insn_raw (copy_rtx (PATTERN (insn))); ++ ++ rtx reg_insn_1 = XEXP (PATTERN (load_insn_1), 0); ++ rtx mem_insn_1 = XEXP (PATTERN (load_insn_1), 1); ++ rtx mem_insn_2 = XEXP (PATTERN (load_insn_2), 1); ++ ++ PUT_MODE (mem_insn_1, DImode); ++ PUT_MODE (mem_insn_2, DImode); ++ ++ int reg_no_1 = REGNO (reg_insn_1); ++ ++ XEXP (PATTERN (load_insn_1), 0) = gen_rtx_REG (DImode, reg_no_1); ++ XEXP (PATTERN (load_insn_2), 0) = gen_rtx_REG (DImode, reg_no_1 + 1); ++ ++ rtx load_insn_2_plus_expr = XEXP (XEXP (PATTERN (load_insn_2), 1), 0); ++ if (GET_CODE (load_insn_2_plus_expr) == REG) ++ { ++ XEXP (XEXP (PATTERN (load_insn_2), 1), 0) ++ = gen_rtx_PLUS (DImode, ++ gen_rtx_REG (DImode, REGNO (load_insn_2_plus_expr)), ++ GEN_INT (GET_MODE_SIZE (DImode))); ++ } ++ else ++ { ++ rtx load_insn_2_offset_expr ++ = XEXP (XEXP (XEXP (PATTERN (load_insn_2), 1), 0), 1); ++ ++ if (load_insn_2_offset_expr == NULL) ++ return; ++ ++ if (GET_CODE (load_insn_2_offset_expr) == CONST_INT) ++ { ++ int load_insn_2_offset = XINT (load_insn_2_offset_expr, 0); ++ XEXP (XEXP (XEXP (PATTERN (load_insn_2), 1), 0), 1) ++ = GEN_INT (load_insn_2_offset + GET_MODE_SIZE (DImode)); ++ } ++ } ++ ++ start_sequence (); ++ ++ int src_regno; ++ rtx srs_reg_insn = XEXP (XEXP (PATTERN (load_insn_1), 1), 0); ++ ++ if (GET_CODE (srs_reg_insn) == REG) ++ src_regno = REGNO (srs_reg_insn); ++ else ++ src_regno = REGNO (XEXP (srs_reg_insn, 0)); ++ ++ /* in cases like ldp r1,r2,[r1] we emit ldr r2,[r1] first. */ ++ if (src_regno == reg_no_1) ++ std::swap (load_insn_1, load_insn_2); ++ ++ rtx_insn *emited_load_insn_1 = emit_insn (PATTERN (load_insn_1)); ++ rtx_insn *emited_load_insn_2 = emit_insn (PATTERN (load_insn_2)); ++ ++ INSN_CODE (emited_load_insn_1) ++ = recog (PATTERN (emited_load_insn_1), emited_load_insn_1, 0); ++ INSN_CODE (emited_load_insn_2) ++ = recog (PATTERN (emited_load_insn_2), emited_load_insn_2, 0); ++ ++ rtx_insn *seq = get_insns (); ++ unshare_all_rtl_in_chain (seq); ++ end_sequence (); ++ ++ emit_insn_after_setloc (seq, prev_insn, INSN_LOCATION (insn)); ++ delete_insn_and_edges (insn); ++} ++ ++void ++pass_split_complex_instructions::split_complex_insn (rtx_insn *insn) ++{ ++ complex_instructions_t insn_type = get_insn_type (insn); ++ /* TODO: Add splitting of STP instructions. */ ++ if (insn_type == LDP || insn_type == STP) ++ split_ldp_stp (insn); ++ else if (insn_type == LDP_TI) ++ split_ldp_ti (insn); ++ else ++ gcc_assert (false && "Unsupported type of insn to split"); ++} ++ ++pass_split_complex_instructions::complex_instructions_t ++pass_split_complex_instructions::get_insn_type (rtx_insn *insn) ++{ ++ if (!INSN_P (insn)) ++ return UNDEFINED; ++ ++ rtx pat = PATTERN (insn); ++ int icode = recog (PATTERN (insn), insn, NULL); ++ ++ if (GET_CODE (pat) == PARALLEL) ++ { ++ if (targetm.is_ldp_insn (icode)) ++ { ++ return LDP; ++ } ++ if (targetm.is_stp_insn (icode)) ++ { ++ return STP; ++ } ++ else ++ { ++ return UNDEFINED; ++ } ++ } ++ rtx set_insn = single_set (insn); ++ if (set_insn && GET_CODE (XEXP (set_insn, 1)) == MEM ++ && GET_MODE (XEXP (set_insn, 1)) == E_TImode) ++ return LDP_TI; ++ ++ return UNDEFINED; ++} ++ ++bool ++pass_split_complex_instructions::gate (function *) ++{ ++ return targetm.is_ldp_insn && targetm.is_stp_insn && optimize > 0 ++ && flag_split_ldp_stp > 0; ++} ++ ++} // anon namespace ++ ++rtl_opt_pass * ++make_pass_split_complex_instructions (gcc::context *ctxt) ++{ ++ return new pass_split_complex_instructions (ctxt); ++} ++ + #if __GNUC__ >= 10 + # pragma GCC diagnostic pop +-#endif ++#endif +\ No newline at end of file +diff --git a/gcc/target.def b/gcc/target.def +index d85adf36a..a3a50b474 100644 +--- a/gcc/target.def ++++ b/gcc/target.def +@@ -2677,6 +2677,16 @@ modes and they have different conditional execution capability, such as ARM.", + bool, (void), + default_have_conditional_execution) + ++DEFHOOK ++(is_ldp_insn, ++ "Return true if icode is corresponding to any of the LDP instruction types.", ++ bool, (int icode), NULL) ++ ++DEFHOOK ++(is_stp_insn, ++ "Return true if icode is corresponding to any of the STP instruction types.", ++ bool, (int icode), NULL) ++ + DEFHOOK + (gen_ccmp_first, + "This function prepares to emit a comparison insn for the first compare in a\n\ +diff --git a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c +new file mode 100644 +index 000000000..3918d43f6 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c +@@ -0,0 +1,74 @@ ++/* { dg-do compile { target aarch64-*-* } } */ ++/* { dg-additional-options "-fsplit-ldp-stp" } */ ++/* ++ * Tests are: ++ * Patterns where LDP insns should NOT be split ++ * */ ++ ++int __RTL (startwith ("split_complex_instructions")) ++simple_ldp_after_store () ++{ ++(function "simple_ldp_after_store" ++ (insn-chain ++ (block 2 ++ (edge-from entry (flags "FALLTHRU")) ++ (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK) ++ (cinsn 228 (set (reg/i:DI sp) ++ (reg/i:DI x0))) ++ (cinsn 101 (set (mem/c:DI ++ (plus:DI (reg/f:DI sp) ++ (const_int 32))[1 S4 A32])(reg:DI x0))) ++ (cinsn 10 (parallel [ ++ (set (reg:DI x29) ++ (mem:DI (plus:DI (reg/f:DI sp) (const_int 8)) [1 S4 A32])) ++ (set (reg:DI x30) ++ (mem:DI (plus:DI (reg/f:DI sp) ++ (const_int 16)) [1 S4 A32]))])) ++ (cinsn 11 (use (reg/i:DI sp))) ++ (cinsn 12 (use (reg/i:DI cc))) ++ (cinsn 13 (use (reg/i:DI x29))) ++ (cinsn 14 (use (reg/i:DI x30))) ++ (cinsn 15 (use (reg/i:DI x0))) ++ (edge-to exit (flags "FALLTHRU")) ++ ) ;; block 2 ++ ) ;; insn-chain ++) ;; function "simple_ldp_after_store" ++} ++ ++int __RTL (startwith ("split_complex_instructions")) ++ldp_after_store_in_different_bb () ++{ ++(function "ldp_after_store_in_different_bb" ++ (insn-chain ++ (block 2 ++ (edge-from entry (flags "FALLTHRU")) ++ (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK) ++ (cinsn 228 (set (reg/i:DI sp) ++ (reg/i:DI x0))) ++ (cinsn 101 (set (mem/c:DI ++ (plus:DI (reg/f:DI sp) ++ (const_int 32))[1 S4 A32])(reg:DI x0))) ++ (edge-to 3 (flags "FALLTHRU")) ++ ) ;; block 2 ++ (block 3 ++ (edge-from 2 (flags "FALLTHRU")) ++ (cnote 4 [bb 3] NOTE_INSN_BASIC_BLOCK) ++ (cinsn 10 (parallel [ ++ (set (reg:DI x29) ++ (mem:DI (plus:DI (reg/f:DI sp) (const_int 8)) [1 S4 A32])) ++ (set (reg:DI x30) ++ (mem:DI (plus:DI (reg/f:DI sp) ++ (const_int 16)) [1 S4 A32]))])) ++ (cinsn 11 (use (reg/i:DI sp))) ++ (cinsn 12 (use (reg/i:DI cc))) ++ (cinsn 13 (use (reg/i:DI x29))) ++ (cinsn 14 (use (reg/i:DI x30))) ++ (cinsn 15 (use (reg/i:DI x0))) ++ (edge-to exit (flags "FALLTHRU")) ++ ) ;; block 3 ++ ) ;; insn-chain ++) ;; function "ldp_after_store_in_different_bb" ++} ++ ++/* Verify that the output code contains exactly 2 ldp. */ ++/* { dg-final { scan-assembler-times {ldp\t} 2 } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c +new file mode 100644 +index 000000000..653c30f83 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c +@@ -0,0 +1,40 @@ ++/* { dg-do compile { target aarch64-*-* } } */ ++/* { dg-additional-options "-fsplit-ldp-stp" } */ ++/* ++ * Test is: ++ * Pattern where LDP insns should be split with rearrangement in order ++ * to deal with data dependecy betwen subinstruction. ++ * */ ++ ++int __RTL (startwith ("split_complex_instructions")) ++simple_ldp_after_store () ++{ ++(function "ldp_equal_registers" ++ (insn-chain ++ (block 2 ++ (edge-from entry (flags "FALLTHRU")) ++ (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK) ++ (cinsn 228 (set (reg/i:DI x1) ++ (reg/i:DI x0))) ++ (cinsn 101 (set (mem/c:DI ++ (plus:DI (reg/f:DI x1) ++ (const_int 8))[1 S4 A32])(reg:DI x0))) ++ (cinsn 10 (parallel [ ++ (set (reg:DI x1) ++ (mem:DI (plus:DI (reg/f:DI x1) (const_int 8)) [1 S4 A32])) ++ (set (reg:DI x2) ++ (mem:DI (plus:DI (reg/f:DI x1) ++ (const_int 16)) [1 S4 A32]))])) ++ (cinsn 11 (use (reg/i:DI sp))) ++ (cinsn 12 (use (reg/i:DI cc))) ++ (cinsn 13 (use (reg/i:DI x0))) ++ (cinsn 14 (use (reg/i:DI x1))) ++ (cinsn 15 (use (reg/i:DI x2))) ++ (edge-to exit (flags "FALLTHRU")) ++ ) ;; block 2 ++ ) ;; insn-chain ++) ;; function "ldp_equal_registers" ++} ++ ++/* Verify that the output code rearrange ldrs. */ ++/* { dg-final { scan-assembler-times ".*ldr.*x2.*x1,.*16.*ldr.*x1.*x1.*8" 1 } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c +new file mode 100644 +index 000000000..dc9f26efb +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c +@@ -0,0 +1,174 @@ ++/* { dg-do compile { target aarch64-*-* } } */ ++/* { dg-additional-options "-O1 -fsplit-ldp-stp" } */ ++/* ++ * Tests are: ++ * Patterns where LDP insns should be split ++ * */ ++ ++int __RTL (startwith ("split_complex_instructions")) ++simple_ldp_after_store () ++{ ++(function "simple_ldp_after_store" ++ (insn-chain ++ (block 2 ++ (edge-from entry (flags "FALLTHRU")) ++ (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK) ++ (cinsn 228 (set (reg/i:DI sp) ++ (reg/i:DI x0))) ++ (cinsn 238 (set (reg/i:DI x1) ++ (reg/i:DI x0))) ++ ++ (cinsn 101 (set (mem/c:DI ++ (plus:DI (reg/f:DI sp) ++ (const_int 8))[1 S4 A32])(reg:DI x0))) ++ (cinsn 10 (parallel [ ++ (set (reg:DI x29) ++ (mem:DI (plus:DI (reg/f:DI sp) (const_int 8)) [1 S4 A32])) ++ (set (reg:DI x30) ++ (mem:DI (plus:DI (reg/f:DI sp) ++ (const_int 16)) [1 S4 A32]))])) ++ ++ (cinsn 102 (set (mem/c:DI (plus:DI (reg/f:DI x1) ++ (const_int -16)) [1 S4 A32]) ++ (reg:DI x0))) ++ (cinsn 11 (parallel [ ++ (set (reg:DI x3) ++ (mem:DI (plus:DI (reg/f:DI x1) (const_int -16)) [1 S4 A32])) ++ (set (reg:DI x4) ++ (mem:DI (plus:DI (reg/f:DI x1) (const_int -8)) [1 S4 A32])) ++ ])) ++ ++ (cinsn 103 (set (mem/c:DI (reg/f:DI x1) [1 S4 A32]) ++ (reg:DI x0))) ++ (cinsn 12 (parallel [ ++ (set (reg:DI x5) (mem:DI (reg/f:DI x1) [1 S4 A32])) ++ (set (reg:DI x6) (mem:DI (plus:DI (reg/f:DI x1) ++ (const_int 8)) [1 S4 A32])) ++ ])) ++ ++ (cinsn 13 (use (reg/i:DI sp))) ++ (cinsn 14 (use (reg/i:DI cc))) ++ (cinsn 15 (use (reg/i:DI x29))) ++ (cinsn 16 (use (reg/i:DI x30))) ++ (cinsn 17 (use (reg/i:DI x0))) ++ (cinsn 18 (use (reg/i:DI x3))) ++ (cinsn 19 (use (reg/i:DI x4))) ++ (cinsn 20 (use (reg/i:DI x5))) ++ (cinsn 21 (use (reg/i:DI x6))) ++ (edge-to exit (flags "FALLTHRU")) ++ ) ;; block 2 ++ ) ;; insn-chain ++) ;; function "simple_ldp_after_store" ++} ++ ++int __RTL (startwith ("split_complex_instructions")) ++ldp_ti_after_store () ++{ ++ (function "ldp_ti_after_store" ++ (insn-chain ++ (block 2 ++ (edge-from entry (flags "FALLTHRU")) ++ (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK) ++ (cinsn 228 (set (reg/i:DI sp) ++ (reg/i:DI x0))) ++ (cinsn 238 (set (reg/i:DI x2) ++ (reg/i:DI x0))) ++ ++ (cinsn 101 (set (mem/c:DI ++ (plus:DI (reg/f:DI sp) ++ (const_int 136))[1 S4 A32])(reg:DI x0))) ++ (insn 81 (set (reg:TI x0 [1 S4 A32]) ++ (mem/c:TI (plus:DI (reg/f:DI sp) ++ (const_int 136 )) [1 S4 A32])) ++ (expr_list:REG_EQUIV (mem/c:TI (plus:DI (reg/f:DI sfp) ++ (const_int -24 )) [1 S4 A32]) ++ (nil))) ++ ++ (cinsn 102 (set (mem/c:DI (plus:DI (reg/f:DI x2) ++ (const_int -16)) [1 S4 A32]) ++ (reg:DI x0))) ++ (insn 82 (set (reg:TI x3 [1 S4 A32]) ++ (mem/c:TI (plus:DI (reg/f:DI x2) ++ (const_int -16)) [1 S4 A32]))) ++ ++ (cinsn 103 (set (mem/c:DI (reg/f:DI x2) [1 S4 A32]) ++ (reg:DI x0))) ++ (insn 83 (set (reg:TI x5 [1 S4 A32]) ++ (mem/c:TI (reg/f:DI x2) [1 S4 A32]))) ++ ++ (cinsn 11 (use (reg/i:DI sp))) ++ (cinsn 12 (use (reg/i:DI cc))) ++ (cinsn 13 (use (reg/i:DI x29))) ++ (cinsn 14 (use (reg/i:DI x30))) ++ (cinsn 15 (use (reg/i:DI x0))) ++ (cinsn 16 (use (reg/i:DI x3))) ++ (cinsn 17 (use (reg/i:DI x5))) ++ (cinsn 18 (use (reg/i:DI x1))) ++ (cinsn 19 (use (reg/i:DI x4))) ++ (cinsn 20 (use (reg/i:DI x6))) ++ (edge-to exit (flags "FALLTHRU")) ++ ) ;; block 2 ++ ) ;; insn-chain ++) ;; function "ldp_ti_after_store" ++} ++ ++int __RTL (startwith ("split_complex_instructions")) ++ldp_after_store_in_different_bb () ++{ ++(function "ldp_after_store_in_different_bb" ++ (insn-chain ++ (block 2 ++ (edge-from entry (flags "FALLTHRU")) ++ (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK) ++ (cinsn 228 (set (reg/i:DI sp) ++ (reg/i:DI x0))) ++ (cinsn 238 (set (reg/i:DI x1) ++ (reg/i:DI x0))) ++ ++ (cinsn 101 (set (mem/c:DI ++ (plus:DI (reg/f:DI sp) ++ (const_int 8))[1 S4 A32])(reg:DI x0))) ++ (cinsn 102 (set (mem/c:DI (plus:DI (reg/f:DI x1) ++ (const_int -16)) [1 S4 A32]) ++ (reg:DI x0))) ++ (cinsn 103 (set (mem/c:DI (reg/f:DI x1) [1 S4 A32]) ++ (reg:DI x0))) ++ (edge-to 3 (flags "FALLTHRU")) ++ ) ;; block 2 ++ (block 3 ++ (edge-from 2 (flags "FALLTHRU")) ++ (cnote 4 [bb 3] NOTE_INSN_BASIC_BLOCK) ++ (cinsn 10 (parallel [ ++ (set (reg:DI x29) ++ (mem:DI (plus:DI (reg/f:DI sp) (const_int 8)) [1 S4 A32])) ++ (set (reg:DI x30) ++ (mem:DI (plus:DI (reg/f:DI sp) ++ (const_int 16)) [1 S4 A32]))])) ++ (cinsn 11 (parallel [ ++ (set (reg:DI x3) ++ (mem:DI (plus:DI (reg/f:DI x1) (const_int -16)) [1 S4 A32])) ++ (set (reg:DI x4) ++ (mem:DI (plus:DI (reg/f:DI x1) (const_int -8)) [1 S4 A32])) ++ ])) ++ (cinsn 12 (parallel [ ++ (set (reg:DI x5) (mem:DI (reg/f:DI x1) [1 S4 A32])) ++ (set (reg:DI x6) (mem:DI (plus:DI (reg/f:DI x1) ++ (const_int 8)) [1 S4 A32])) ++ ])) ++ (cinsn 13 (use (reg/i:DI sp))) ++ (cinsn 14 (use (reg/i:DI cc))) ++ (cinsn 15 (use (reg/i:DI x29))) ++ (cinsn 16 (use (reg/i:DI x30))) ++ (cinsn 17 (use (reg/i:DI x0))) ++ (cinsn 18 (use (reg/i:DI x3))) ++ (cinsn 19 (use (reg/i:DI x4))) ++ (cinsn 20 (use (reg/i:DI x5))) ++ (cinsn 21 (use (reg/i:DI x6))) ++ (edge-to exit (flags "FALLTHRU")) ++ ) ;; block 3 ++ ) ;; insn-chain ++) ;; function "ldp_after_store_in_different_bb" ++} ++ ++/* Verify that the output code doesn't contain ldp. */ ++/* { dg-final { scan-assembler-not {ldp\t} } } */ +\ No newline at end of file +diff --git a/gcc/timevar.def b/gcc/timevar.def +index 1e7d4e74b..2ccecffb5 100644 +--- a/gcc/timevar.def ++++ b/gcc/timevar.def +@@ -280,6 +280,7 @@ DEFTIMEVAR (TV_RELOAD_CSE_REGS , "reload CSE regs") + DEFTIMEVAR (TV_GCSE_AFTER_RELOAD , "load CSE after reload") + DEFTIMEVAR (TV_REE , "ree") + DEFTIMEVAR (TV_THREAD_PROLOGUE_AND_EPILOGUE, "thread pro- & epilogue") ++DEFTIMEVAR (TV_SPLIT_CMP_INS , "split complex instructions") + DEFTIMEVAR (TV_IFCVT2 , "if-conversion 2") + DEFTIMEVAR (TV_SPLIT_PATHS , "split paths") + DEFTIMEVAR (TV_COMBINE_STACK_ADJUST , "combine stack adjustments") +diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h +index 86f38e2f2..6daac7fc1 100644 +--- a/gcc/tree-pass.h ++++ b/gcc/tree-pass.h +@@ -612,6 +612,7 @@ extern rtl_opt_pass *make_pass_split_after_reload (gcc::context *ctxt); + extern rtl_opt_pass *make_pass_thread_prologue_and_epilogue (gcc::context + *ctxt); + extern rtl_opt_pass *make_pass_zero_call_used_regs (gcc::context *ctxt); ++extern rtl_opt_pass *make_pass_split_complex_instructions (gcc::context *ctxt); + extern rtl_opt_pass *make_pass_stack_adjustments (gcc::context *ctxt); + extern rtl_opt_pass *make_pass_sched_fusion (gcc::context *ctxt); + extern rtl_opt_pass *make_pass_peephole2 (gcc::context *ctxt); +-- +2.33.0 + diff --git a/0043-Extending-and-refactoring-of-pass_split_complex_inst.patch b/0043-Extending-and-refactoring-of-pass_split_complex_inst.patch new file mode 100644 index 0000000000000000000000000000000000000000..509a534f042554b56e658fda16b1ee63a04649c8 --- /dev/null +++ b/0043-Extending-and-refactoring-of-pass_split_complex_inst.patch @@ -0,0 +1,1426 @@ +From a49db831320ac70ca8f46b94ee60d7c6951f65c3 Mon Sep 17 00:00:00 2001 +From: Gadzhiev Emin WX1195297 +Date: Wed, 20 Dec 2023 21:36:07 +0300 +Subject: [PATCH 10/18] Extending and refactoring of + pass_split_complex_instructions + +- Add flag parameter in is_ldp_insn and is_stp_insn to know + if instruction has writeback operation +- Add support of PRE_*, POST_* operands as a memory address + expression +- Split only LDPs that intersect with a dependent store + instruction +- Make the selection of dependent store instructions stricter + so it will be enough to check by BFS that dependent store + instruction appears in search range. +- Add helper methods to retrieve fields of rtx +- Remove redundant iterations in find_dependent_stores_candidates +- Refactor generation of instructions +- Add more test cases +--- + gcc/config/aarch64/aarch64.cc | 62 +- + gcc/doc/tm.texi | 12 +- + gcc/sched-rgn.cc | 771 +++++++++--------- + gcc/target.def | 14 +- + .../gcc.dg/rtl/aarch64/test-ldp-dont-split.c | 35 +- + .../rtl/aarch64/test-ldp-split-rearrange.c | 2 +- + .../gcc.dg/rtl/aarch64/test-ldp-split.c | 181 +++- + 7 files changed, 603 insertions(+), 474 deletions(-) + +diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc +index 48e2eded0..fa566dd80 100644 +--- a/gcc/config/aarch64/aarch64.cc ++++ b/gcc/config/aarch64/aarch64.cc +@@ -27507,39 +27507,59 @@ aarch64_run_selftests (void) + + #endif /* #if CHECKING_P */ + +-/* TODO: refuse to use ranges intead of full list of an instruction codes. */ ++/* TODO: refuse to use ranges instead of full list of an instruction codes. */ + + bool +-is_aarch64_ldp_insn (int icode) ++is_aarch64_ldp_insn (int icode, bool *has_wb) + { + if ((icode >= CODE_FOR_load_pair_sw_sisi +- && icode <= CODE_FOR_load_pair_dw_tftf) ++ && icode <= CODE_FOR_load_pair_sw_sfsf) ++ || (icode >= CODE_FOR_load_pair_dw_didi ++ && icode <= CODE_FOR_load_pair_dw_dfdf) ++ || (icode == CODE_FOR_load_pair_dw_tftf) + || (icode >= CODE_FOR_loadwb_pairsi_si +- && icode <= CODE_FOR_loadwb_pairtf_di) +- || (icode >= CODE_FOR_load_pairv8qiv8qi +- && icode <= CODE_FOR_load_pairdfdf) +- || (icode >= CODE_FOR_load_pairv16qiv16qi +- && icode <= CODE_FOR_load_pairv8bfv2df) +- || (icode >= CODE_FOR_load_pair_lanesv8qi +- && icode <= CODE_FOR_load_pair_lanesdf)) +- return true; ++ && icode <= CODE_FOR_loadwb_pairdi_di) ++ || (icode >= CODE_FOR_loadwb_pairsf_si ++ && icode <= CODE_FOR_loadwb_pairdf_di) ++ || (icode >= CODE_FOR_loadwb_pairti_si ++ && icode <= CODE_FOR_loadwb_pairtf_di)) ++ { ++ if (has_wb) ++ *has_wb = ((icode >= CODE_FOR_loadwb_pairsi_si ++ && icode <= CODE_FOR_loadwb_pairdi_di) ++ || (icode >= CODE_FOR_loadwb_pairsf_si ++ && icode <= CODE_FOR_loadwb_pairdf_di) ++ || (icode >= CODE_FOR_loadwb_pairti_si ++ && icode <= CODE_FOR_loadwb_pairtf_di)); ++ return true; ++ } + return false; + } + + bool +-is_aarch64_stp_insn (int icode) ++is_aarch64_stp_insn (int icode, bool *has_wb) + { + if ((icode >= CODE_FOR_store_pair_sw_sisi +- && icode <= CODE_FOR_store_pair_dw_tftf) ++ && icode <= CODE_FOR_store_pair_sw_sfsf) ++ || (icode >= CODE_FOR_store_pair_dw_didi ++ && icode <= CODE_FOR_store_pair_dw_dfdf) ++ || (icode == CODE_FOR_store_pair_dw_tftf) + || (icode >= CODE_FOR_storewb_pairsi_si +- && icode <= CODE_FOR_storewb_pairtf_di) +- || (icode >= CODE_FOR_vec_store_pairv8qiv8qi +- && icode <= CODE_FOR_vec_store_pairdfdf) +- || (icode >= CODE_FOR_vec_store_pairv16qiv16qi +- && icode <= CODE_FOR_vec_store_pairv8bfv2df) +- || (icode >= CODE_FOR_store_pair_lanesv8qi +- && icode <= CODE_FOR_store_pair_lanesdf)) +- return true; ++ && icode <= CODE_FOR_storewb_pairdi_di) ++ || (icode >= CODE_FOR_storewb_pairsf_si ++ && icode <= CODE_FOR_storewb_pairdf_di) ++ || (icode >= CODE_FOR_storewb_pairti_si ++ && icode <= CODE_FOR_storewb_pairtf_di)) ++ { ++ if (has_wb) ++ *has_wb = ((icode >= CODE_FOR_storewb_pairsi_si ++ && icode <= CODE_FOR_storewb_pairdi_di) ++ || (icode >= CODE_FOR_storewb_pairsf_si ++ && icode <= CODE_FOR_storewb_pairdf_di) ++ || (icode >= CODE_FOR_storewb_pairti_si ++ && icode <= CODE_FOR_storewb_pairtf_di)); ++ return true; ++ } + return false; + } + +diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi +index 0c6415a9c..3b6e90bf2 100644 +--- a/gcc/doc/tm.texi ++++ b/gcc/doc/tm.texi +@@ -12113,12 +12113,16 @@ object files that are not referenced from @code{main} and uses export + lists. + @end defmac + +-@deftypefn {Target Hook} bool TARGET_IS_LDP_INSN (int @var{icode}) +-Return true if icode is corresponding to any of the LDP instruction types. ++@deftypefn {Target Hook} bool TARGET_IS_LDP_INSN (int @var{icode}, bool *@var{has_wb}) ++Return true if @var{icode} is corresponding to any of the LDP instruction ++types. If @var{has_wb} is not NULL then its value is set to true if LDP ++contains post-index or pre-index operation. + @end deftypefn + +-@deftypefn {Target Hook} bool TARGET_IS_STP_INSN (int @var{icode}) +-Return true if icode is corresponding to any of the STP instruction types. ++@deftypefn {Target Hook} bool TARGET_IS_STP_INSN (int @var{icode}, bool *@var{has_wb}) ++Return true if @var{icode} is corresponding to any of the STP instruction ++types. If @var{has_wb} is not NULL then its value is set to true if STP ++contains post-index or pre-index operation. + @end deftypefn + + @deftypefn {Target Hook} bool TARGET_CANNOT_MODIFY_JUMPS_P (void) +diff --git a/gcc/sched-rgn.cc b/gcc/sched-rgn.cc +index b4df8bdc5..5f61de1c8 100644 +--- a/gcc/sched-rgn.cc ++++ b/gcc/sched-rgn.cc +@@ -3956,7 +3956,7 @@ make_pass_sched_fusion (gcc::context *ctxt) + + namespace { + +-/* Def-use analisys special functions implementation. */ ++/* Def-use analysis special functions implementation. */ + + static struct df_link * + get_defs (rtx_insn *insn, rtx reg) +@@ -4032,42 +4032,66 @@ const pass_data pass_data_split_complex_instructions = { + (TODO_df_verify | TODO_df_finish), /* Todo_flags_finish. */ + }; + ++/* Pass split_complex_instructions finds LOAD PAIR instructions (LDP) that can ++ be split into two LDR instructions. It splits only those LDP for which one ++ half of the requested memory is contained in the preceding STORE (STR/STP) ++ instruction whose base register has the same definition. This allows ++ to use hardware store-to-load forwarding mechanism and to get one half of ++ requested memory from the store queue of CPU. ++ ++ TODO: Add split of STP. ++ TODO: Add split of vector STP and LDP. */ + class pass_split_complex_instructions : public rtl_opt_pass + { + private: +- enum complex_instructions_t ++ enum mem_access_insn_t + { + UNDEFINED, + LDP, ++ /* LDP with post-index (see loadwb_pair in config/aarch64.md). */ ++ LDP_WB, ++ /* LDP that contains one destination register in RTL IR ++ (see movti_aarch64 in config/aarch64.md). */ + LDP_TI, + STP, ++ /* STP with pre-index (see storewb_pair in config/aarch64.md). */ ++ STP_WB, ++ /* STP that contains one source register in RTL IR ++ (see movti_aarch64 in config/aarch64.md). */ ++ STP_TI, + STR + }; + +- void split_complex_insn (rtx_insn *insn); +- void split_ldp_ti (rtx_insn *insn); +- void split_ldp_with_offset (rtx_insn *ldp_insn); +- void split_simple_ldp (rtx_insn *ldp_insn); +- void split_ldp_stp (rtx_insn *insn); +- complex_instructions_t get_insn_type (rtx_insn *insn); +- +- basic_block bb; +- rtx_insn *insn; + std::set dependent_stores_candidates; + std::set ldp_to_split_list; + +- complex_instructions_t complex_insn_type = UNDEFINED; +- bool is_store_insn (rtx_insn *insn); +- bool is_ldp_dependent_on_store (rtx_insn *ldp_insn, basic_block bb); ++ void split_ldp_ti (rtx_insn *insn); ++ void split_ldp (rtx_insn *ldp_insn); ++ /* Emit a NEW_INSNS chain, recognize instruction code of each new instruction ++ and replace OLD_INSN with the emitted sequence. */ ++ void replace_insn (rtx_insn *old_insn, rtx_insn *new_insns); ++ ++ mem_access_insn_t get_insn_type (rtx_insn *insn); ++ bool is_typeof_ldp (mem_access_insn_t insn_type); ++ bool is_typeof_stp (mem_access_insn_t insn_type); ++ + bool bfs_for_reg_dependent_store (rtx_insn *ldp_insn, basic_block search_bb, + rtx_insn *search_insn, + int search_range + = param_ldp_dependency_search_range); + bool is_store_reg_dependent (rtx_insn *ldp_insn, rtx_insn *str_insn); + void init_df (); +- void find_dependent_stores_candidates (rtx_insn *ldp_insn); +- int get_insn_offset (rtx_insn *insn, complex_instructions_t insn_type, +- int *arith_operation_ptr = NULL); ++ void find_dependent_stores_candidates (rtx_insn *ldp_insn, ++ mem_access_insn_t insn_type); ++ ++ rtx get_memref (rtx_insn *insn, mem_access_insn_t insn_type); ++ rtx get_base_reg (rtx memref); ++ /* Set OFFSET to the offset value. Returns TRUE if MEMREF's address ++ expression is supported, FALSE otherwise. */ ++ bool get_offset (rtx memref, int &offset); ++ /* Return size of memory referenced by MEMREF. Returns -1 if INSN_TYPE ++ wasn't recognized. */ ++ int get_unit_size (rtx memref, mem_access_insn_t insn_type); + + public: + pass_split_complex_instructions (gcc::context *ctxt) +@@ -4080,28 +4104,22 @@ public: + virtual unsigned int + execute (function *) + { +- enum rtx_code ldp_memref_code; ++ basic_block bb; ++ rtx_insn *insn; ++ + init_df (); + ldp_to_split_list.clear (); + FOR_EACH_BB_FN (bb, cfun) + { + FOR_BB_INSNS (bb, insn) + { +- complex_instructions_t insn_type = get_insn_type (insn); +- /* TODO: Add splitting of STP instructions. */ +- if (insn_type != LDP && insn_type != LDP_TI) ++ mem_access_insn_t insn_type = get_insn_type (insn); ++ if (!is_typeof_ldp (insn_type)) + continue; +- /* TODO: Currently support only ldp_ti and ldp with REG or +- PLUS/MINUS offset expression. */ +- if (insn_type == LDP_TI) +- { +- ldp_memref_code = GET_CODE (XEXP (XEXP (PATTERN (insn), 1), +- 0)); +- if (ldp_memref_code != REG && ldp_memref_code != PLUS +- && ldp_memref_code != MINUS) +- continue; +- } +- if (is_ldp_dependent_on_store (insn, bb)) ++ ++ find_dependent_stores_candidates (insn, insn_type); ++ if (!dependent_stores_candidates.empty () ++ && bfs_for_reg_dependent_store (insn, bb, insn)) + { + ldp_to_split_list.insert (insn); + } +@@ -4110,18 +4128,107 @@ public: + + for (std::set::iterator i = ldp_to_split_list.begin (); + i != ldp_to_split_list.end (); ++i) +- split_complex_insn (*i); ++ split_ldp (*i); + + return 0; + } + }; // class pass_split_complex_instructions + + bool +-pass_split_complex_instructions::is_ldp_dependent_on_store (rtx_insn *ldp_insn, +- basic_block bb) ++pass_split_complex_instructions::is_typeof_ldp ( ++ mem_access_insn_t insn_type) + { +- find_dependent_stores_candidates (ldp_insn); +- return bfs_for_reg_dependent_store (ldp_insn, bb, ldp_insn); ++ return (insn_type == LDP || insn_type == LDP_WB || insn_type == LDP_TI); ++} ++ ++bool ++pass_split_complex_instructions::is_typeof_stp ( ++ mem_access_insn_t insn_type) ++{ ++ return (insn_type == STP || insn_type == STP_WB || insn_type == STP_TI); ++} ++ ++rtx ++pass_split_complex_instructions::get_memref ( ++ rtx_insn *insn, mem_access_insn_t insn_type) ++{ ++ rtx insn_pat = PATTERN (insn); ++ rtx memref = NULL; ++ ++ switch (insn_type) ++ { ++ case LDP: ++ memref = SET_SRC (XVECEXP (insn_pat, 0, 0)); ++ break; ++ case LDP_WB: ++ memref = SET_SRC (XVECEXP (insn_pat, 0, 1)); ++ break; ++ case LDP_TI: ++ memref = SET_SRC (insn_pat); ++ break; ++ case STP: ++ memref = SET_DEST (XVECEXP (insn_pat, 0, 0)); ++ break; ++ case STP_WB: ++ memref = SET_DEST (XVECEXP (insn_pat, 0, 1)); ++ break; ++ case STP_TI: ++ case STR: ++ memref = SET_DEST (insn_pat); ++ break; ++ default: ++ break; ++ } ++ ++ if (memref && !MEM_P (memref)) ++ return NULL; ++ return memref; ++} ++ ++rtx ++pass_split_complex_instructions::get_base_reg (rtx memref) ++{ ++ if (!memref || !MEM_P (memref)) ++ return NULL; ++ rtx addr_exp = XEXP (memref, 0); ++ ++ switch (GET_CODE (addr_exp)) ++ { ++ case REG: ++ return addr_exp; ++ case PLUS: ++ case PRE_DEC: ++ case PRE_INC: ++ case POST_DEC: ++ case POST_INC: ++ if (REG_P (XEXP (addr_exp, 0))) ++ return XEXP (addr_exp, 0); ++ default: ++ return NULL; ++ } ++} ++ ++int ++pass_split_complex_instructions::get_unit_size ( ++ rtx memref, mem_access_insn_t insn_type) ++{ ++ if (!memref) ++ return -1; ++ ++ switch (insn_type) ++ { ++ case LDP: ++ case STP: ++ case LDP_WB: ++ case STP_WB: ++ case STR: ++ return GET_MODE_SIZE (GET_MODE (memref)).to_constant (); ++ case LDP_TI: ++ case STP_TI: ++ return GET_MODE_SIZE (E_DImode).to_constant (); ++ default: ++ return -1; ++ } + } + + bool +@@ -4135,9 +4242,9 @@ pass_split_complex_instructions::bfs_for_reg_dependent_store ( + { + if (!current_search_insn) + return false; +- bool checking_result +- = is_store_reg_dependent (ldp_insn, current_search_insn); +- if (checking_result) ++ ++ if (dependent_stores_candidates.find (current_search_insn) ++ != dependent_stores_candidates.end ()) + { + if (dump_file) + { +@@ -4185,30 +4292,29 @@ pass_split_complex_instructions::init_df () + + void + pass_split_complex_instructions::find_dependent_stores_candidates ( +- rtx_insn *ldp_insn) ++ rtx_insn *ldp_insn, mem_access_insn_t insn_type) + { + dependent_stores_candidates.clear (); +- df_ref use; + +- FOR_EACH_INSN_USE (use, ldp_insn) +- { +- df_link *defs = get_defs (ldp_insn, DF_REF_REG (use)); +- if (!defs) +- return; ++ rtx base_reg = get_base_reg (get_memref (ldp_insn, insn_type)); ++ if (!base_reg) ++ return; + +- for (df_link *def = defs; def; def = def->next) +- { +- df_link *uses +- = get_uses (DF_REF_INSN (def->ref), DF_REF_REG (def->ref)); +- if (!uses) +- continue; ++ df_link *defs = get_defs (ldp_insn, base_reg); ++ if (!defs) ++ return; + +- for (df_link *use = uses; use; use = use->next) +- { +- if (DF_REF_CLASS (use->ref) == DF_REF_REGULAR +- && is_store_insn (DF_REF_INSN (use->ref))) +- dependent_stores_candidates.insert (DF_REF_INSN (use->ref)); +- } ++ for (df_link *def = defs; def; def = def->next) ++ { ++ df_link *uses = get_uses (DF_REF_INSN (def->ref), DF_REF_REG (def->ref)); ++ if (!uses) ++ continue; ++ for (df_link *use = uses; use; use = use->next) ++ { ++ if (DF_REF_CLASS (use->ref) == DF_REF_REGULAR ++ && DF_REF_INSN (use->ref) != ldp_insn ++ && is_store_reg_dependent (ldp_insn, DF_REF_INSN (use->ref))) ++ dependent_stores_candidates.insert (DF_REF_INSN (use->ref)); + } + } + } +@@ -4217,423 +4323,274 @@ bool + pass_split_complex_instructions::is_store_reg_dependent (rtx_insn *ldp_insn, + rtx_insn *str_insn) + { +- if (!is_store_insn (str_insn) +- || dependent_stores_candidates.find (str_insn) +- == dependent_stores_candidates.end ()) ++ if (!str_insn) + return false; + +- int ldp_offset_sign = UNDEFINED; +- int ldp_offset +- = get_insn_offset (ldp_insn, get_insn_type (ldp_insn), &ldp_offset_sign); +- if (ldp_offset_sign == MINUS) +- ldp_offset = -ldp_offset; ++ mem_access_insn_t st_type = get_insn_type (str_insn); ++ if (!is_typeof_stp (st_type) && st_type != STR) ++ return false; + +- int str_offset_sign = UNDEFINED; +- int str_offset = get_insn_offset (str_insn, STR, &str_offset_sign); +- if (str_offset_sign == MINUS) +- str_offset = -str_offset; ++ mem_access_insn_t ld_type = get_insn_type (ldp_insn); ++ rtx ld_memref = get_memref (ldp_insn, ld_type); ++ rtx st_memref = get_memref (str_insn, st_type); ++ rtx ld_base_reg = get_base_reg (ld_memref); ++ rtx st_base_reg = get_base_reg (st_memref); + +- if (str_offset == ldp_offset || str_offset == ldp_offset + 8) +- return true; ++ if (!ld_base_reg || !st_base_reg ++ || REGNO (ld_base_reg) != REGNO (st_base_reg)) ++ return false; + +- return false; +-} ++ int ld_offset = 0; ++ int st_offset = 0; ++ if (get_offset (ld_memref, ld_offset) ++ && get_offset (st_memref, st_offset)) ++ { ++ int ld_unit_size = get_unit_size (ld_memref, ld_type); ++ int st_size = get_unit_size (st_memref, st_type); ++ if (st_type != STR) ++ st_size *= 2; + +-bool +-pass_split_complex_instructions::is_store_insn (rtx_insn *insn) +-{ +- if (!insn) +- return false; +- rtx sset_b = single_set (insn); +- /* TODO: The condition below allow to take only store instructions in which +- the memory location's operand is either a register (base) or an plus/minus +- operation (base + #imm). So it might make sense to add support for other +- cases (e.g. multiply and shift). */ +- if (sset_b && MEM_P (SET_DEST (sset_b)) +- && GET_MODE (XEXP (sset_b, 0)) != BLKmode +- && (GET_CODE (XEXP (XEXP (sset_b, 0), 0)) == REG +- || (GET_CODE (XEXP (XEXP (sset_b, 0), 0)) == PLUS +- || GET_CODE (XEXP (XEXP (sset_b, 0), 0)) == MINUS) +- && (GET_CODE (XEXP (XEXP (XEXP (sset_b, 0), 0), 1)) == CONST_INT))) +- return true; ++ if (ld_unit_size < 0 || st_size < 0) ++ return false; ++ ++ bool st_has_low_ld_part = (ld_offset >= st_offset ++ && (ld_offset + ld_unit_size <= st_offset + st_size)); ++ bool st_has_high_ld_part = ((ld_offset + ld_unit_size >= st_offset) ++ && (ld_offset + 2 * ld_unit_size <= st_offset + st_size)); ++ bool st_has_not_full_ld = (ld_offset < st_offset ++ || (ld_offset + 2 * ld_unit_size > st_offset + st_size)); ++ ++ if ((st_has_low_ld_part || st_has_high_ld_part) && st_has_not_full_ld) ++ return true; ++ } + + return false; + } + +-int +-pass_split_complex_instructions::get_insn_offset ( +- rtx_insn *insn, complex_instructions_t insn_type, int *arith_operation_ptr) ++bool ++pass_split_complex_instructions::get_offset (rtx memref, int &offset) + { +- rtx insn_pat = PATTERN (insn); +- int returned_offset = 0; ++ rtx addr_exp = XEXP (memref, 0); + +- rtx offset_expr = NULL; +- rtx offset_value_expr = NULL; +- +- switch (insn_type) ++ switch (GET_CODE (addr_exp)) + { +- case LDP: +- { +- int number_of_sub_insns = XVECLEN (insn_pat, 0); +- +- /* Calculate it's own ofsset of first load insn. */ +- rtx_insn *first_load_insn = NULL; +- if (number_of_sub_insns == 2) ++ case REG: ++ case POST_DEC: ++ case POST_INC: ++ offset = 0; ++ return true; ++ case PRE_DEC: ++ offset = -(GET_MODE_SIZE (GET_MODE (memref)).to_constant ()); ++ return true; ++ case PRE_INC: ++ offset = GET_MODE_SIZE (GET_MODE (memref)).to_constant (); ++ return true; ++ case PLUS: ++ if (CONST_INT_P (XEXP (addr_exp, 1))) + { +- first_load_insn +- = make_insn_raw (copy_rtx (XVECEXP (insn_pat, 0, 0))); +- arith_operation_ptr = NULL; +- +- offset_expr = XEXP (XEXP (PATTERN (first_load_insn), 1), 0); +- if (GET_CODE (offset_expr) == PLUS +- || GET_CODE (offset_expr) == MINUS) +- offset_value_expr +- = XEXP (XEXP (XEXP (PATTERN (first_load_insn), 1), 0), 1); +- else +- offset_expr = NULL; ++ offset = INTVAL (XEXP (addr_exp, 1)); ++ return true; + } +- else if (number_of_sub_insns == 3) +- { +- rtx_insn *offset_sub_insn +- = make_insn_raw (copy_rtx (XVECEXP (insn_pat, 0, 0))); +- +- offset_expr = XEXP (PATTERN (offset_sub_insn), 1); +- offset_value_expr = XEXP (XEXP (PATTERN (offset_sub_insn), 1), 1); +- } +- else +- { +- gcc_assert (false +- && "Wrong number of elements in the ldp_insn vector"); +- } +- break; +- } +- case LDP_TI: +- { +- offset_expr = XEXP (XEXP (insn_pat, 1), 0); +- if (GET_CODE (offset_expr) != PLUS && GET_CODE (offset_expr) != MINUS) +- return 0; +- offset_value_expr = XEXP (XEXP (XEXP (insn_pat, 1), 0), 1); +- break; +- } +- case STR: +- { +- offset_expr = XEXP (XEXP (insn_pat, 0), 0); +- /* If memory location is specified by single base register then the +- offset is zero. */ +- if (GET_CODE (offset_expr) == REG) +- return 0; +- offset_value_expr = XEXP (XEXP (XEXP (insn_pat, 0), 0), 1); +- break; +- } +- default: +- { +- if (dumps_are_enabled && dump_file) +- { +- fprintf (dump_file, "Instruction that was tried to split:\n"); +- print_rtl_single (dump_file, insn); +- } +- gcc_assert (false && "Unsupported instruction type"); +- break; +- } +- } +- +- if (offset_expr != NULL && offset_value_expr +- && GET_CODE (offset_value_expr) == CONST_INT) +- returned_offset = XINT (offset_value_expr, 0); +- +- if (arith_operation_ptr != NULL) +- { +- *arith_operation_ptr = GET_CODE (offset_expr); +- gcc_assert ((*arith_operation_ptr == MINUS +- || *arith_operation_ptr == PLUS) +- && "Unexpected arithmetic operation in the offset expr"); ++ default: ++ return false; + } +- +- return returned_offset; + } + + void +-pass_split_complex_instructions::split_simple_ldp (rtx_insn *ldp_insn) ++pass_split_complex_instructions::replace_insn (rtx_insn *old_insn, ++ rtx_insn *new_insns) + { +- rtx pat = PATTERN (ldp_insn); +- +- rtx_insn *mem_insn_1 = make_insn_raw (copy_rtx (XVECEXP (pat, 0, 0))); +- rtx_insn *mem_insn_2 = make_insn_raw (copy_rtx (XVECEXP (pat, 0, 1))); +- +- int dest_regno = REGNO (SET_DEST (PATTERN (mem_insn_1))); +- int src_regno; +- +- rtx srs_reg_insn = XEXP (SET_SRC (PATTERN (mem_insn_1)), 0); +- +- if (GET_CODE (srs_reg_insn) == REG) +- src_regno = REGNO (srs_reg_insn); +- else +- src_regno = REGNO (XEXP (srs_reg_insn, 0)); +- +- rtx_insn *emited_insn_1, *emited_insn_2; ++ rtx_insn *prev_insn = PREV_INSN (old_insn); ++ start_sequence (); + +- /* in cases like ldp r1,r2,[r1] we emit ldr r2,[r1] first. */ +- if (src_regno == dest_regno) +- std::swap (mem_insn_1, mem_insn_2); ++ emit_insn (new_insns); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Split LDP:\n"); ++ print_rtl_single (dump_file, old_insn); ++ fprintf (dump_file, "Split into:\n"); ++ } + +- emited_insn_1 = emit_insn (PATTERN (mem_insn_1)); +- emited_insn_2 = emit_insn (PATTERN (mem_insn_2)); ++ for (rtx_insn *insn = new_insns; insn; insn = NEXT_INSN (insn)) ++ { ++ INSN_CODE (insn) = recog (PATTERN (insn), insn, NULL); ++ if (dump_file) ++ { ++ print_rtl_single (dump_file, insn); ++ } ++ } + +- int sub_insn_1_code = recog (PATTERN (mem_insn_1), mem_insn_1, 0); +- int sub_insn_2_code = recog (PATTERN (mem_insn_2), mem_insn_2, 0); ++ rtx_insn *seq = get_insns (); ++ unshare_all_rtl_in_chain (seq); ++ end_sequence (); + +- INSN_CODE (emited_insn_1) = sub_insn_1_code; +- INSN_CODE (emited_insn_2) = sub_insn_2_code; ++ emit_insn_after_setloc (seq, prev_insn, INSN_LOCATION (old_insn)); ++ delete_insn_and_edges (old_insn); + } + + void +-pass_split_complex_instructions::split_ldp_with_offset (rtx_insn *ldp_insn) ++pass_split_complex_instructions::split_ldp (rtx_insn *ldp_insn) + { + rtx pat = PATTERN (ldp_insn); +- bool post_index = true; +- +- rtx_insn offset_insn; +- rtx_insn mem_insn_1; +- rtx_insn mem_insn_2; ++ mem_access_insn_t insn_type = get_insn_type (ldp_insn); ++ gcc_assert (is_typeof_ldp (insn_type)); + +- int offset_insn_code; +- int mem_insn_1_code = -1; +- int mem_insn_2_code = -1; ++ rtx load_rtx_1 = NULL; ++ rtx load_rtx_2 = NULL; ++ rtx post_index_rtx = NULL; + +- int offset = 0; +- int arith_operation = UNDEFINED; +- +- for (int i = 0; i < 3; i++) ++ switch (insn_type) + { +- rtx sub_insn = XVECEXP (pat, 0, i); +- rtx_insn *copy_of_sub_insn = make_insn_raw (copy_rtx (sub_insn)); +- int sub_insn_code +- = recog (PATTERN (copy_of_sub_insn), copy_of_sub_insn, 0); +- +- /* If sub_insn is offset related. */ +- if (GET_RTX_CLASS (sub_insn_code) == RTX_UNARY) +- { +- offset_insn = *copy_of_sub_insn; +- offset_insn_code = sub_insn_code; +- gcc_assert (i == 0 +- && "Offset related insn must be the first " +- "element of a parallel insn vector"); +- +- offset = get_insn_offset (ldp_insn, LDP, &arith_operation); +- } +- else +- { +- if (GET_CODE (XEXP (PATTERN (copy_of_sub_insn), 0)) != REG) +- { +- rtx &offset_expr +- = XEXP (XEXP (XEXP (PATTERN (copy_of_sub_insn), 0), 0), 1); +- if (GET_CODE (offset_expr) == CONST_INT) +- { +- int local_offset = XINT (offset_expr, 0); +- offset = (arith_operation == PLUS ? offset : -offset); +- +- offset_expr = GEN_INT (local_offset + offset); +- +- gcc_assert ( +- (arith_operation == MINUS || arith_operation == PLUS) +- && "Unexpected arithmetic operation in offset related " +- "sub_insn"); +- +- if (i == 1) +- post_index = false; +- } +- else +- { +- post_index = true; +- } +- } +- } +- if (i == 1) +- { +- mem_insn_1 = *copy_of_sub_insn; +- mem_insn_1_code = sub_insn_code; +- } +- if (i == 2) +- { +- mem_insn_2 = *copy_of_sub_insn; +- mem_insn_2_code = sub_insn_code; +- } ++ case LDP: ++ load_rtx_1 = copy_rtx (XVECEXP (pat, 0, 0)); ++ load_rtx_2 = copy_rtx (XVECEXP (pat, 0, 1)); ++ break; ++ case LDP_WB: ++ post_index_rtx = copy_rtx (XVECEXP (pat, 0, 0)); ++ load_rtx_1 = copy_rtx (XVECEXP (pat, 0, 1)); ++ load_rtx_2 = copy_rtx (XVECEXP (pat, 0, 2)); ++ break; ++ case LDP_TI: ++ split_ldp_ti (ldp_insn); ++ return; ++ default: ++ return; + } +- gcc_assert (mem_insn_1_code != -1 && mem_insn_2_code != -1 +- && "Uninitialized memory insns"); + +- int dest_regno = REGNO (SET_DEST (PATTERN (&mem_insn_1))); +- int src_regno; +- +- rtx srs_reg_insn = XEXP (SET_SRC (PATTERN (&mem_insn_1)), 0); +- +- if (GET_CODE (srs_reg_insn) == REG) +- src_regno = REGNO (srs_reg_insn); +- else +- src_regno = REGNO (XEXP (srs_reg_insn, 0)); ++ int dest_regno = REGNO (SET_DEST (load_rtx_1)); ++ int base_regno = REGNO (get_base_reg (get_memref (ldp_insn, insn_type))); + +- /* Don't split such weird LDP. */ +- if (src_regno == dest_regno) +- return; +- +- rtx_insn *emited_offset_insn; +- if (!post_index) ++ /* In cases like ldp r1,r2,[r1[, #imm]] emit ldr r2,[r1[, #imm]] first. ++ For LDP with post-index don't split such instruction. */ ++ if (base_regno == dest_regno) + { +- emited_offset_insn = emit_insn (PATTERN (&offset_insn)); +- INSN_CODE (emited_offset_insn) = offset_insn_code; ++ if (insn_type == LDP) ++ std::swap (load_rtx_1, load_rtx_2); ++ else ++ return; + } + +- rtx_insn *emited_insn_1 = emit_insn (PATTERN (&mem_insn_1)); +- rtx_insn *emited_insn_2 = emit_insn (PATTERN (&mem_insn_2)); +- +- +- INSN_CODE (emited_insn_1) = mem_insn_1_code; +- INSN_CODE (emited_insn_2) = mem_insn_2_code; +- +- if (post_index) ++ /* Construct the instruction chain for subsequent emitting. */ ++ rtx_insn *insn_seq = make_insn_raw (load_rtx_1); ++ rtx_insn *load_insn_2 = make_insn_raw (load_rtx_2); ++ SET_NEXT_INSN (insn_seq) = load_insn_2; ++ SET_NEXT_INSN (load_insn_2) = NULL; ++ if (post_index_rtx) + { +- emited_offset_insn = emit_insn (PATTERN (&offset_insn)); +- INSN_CODE (emited_offset_insn) = offset_insn_code; ++ rtx_insn *post_index_insn = make_insn_raw (post_index_rtx); ++ SET_NEXT_INSN (load_insn_2) = post_index_insn; ++ SET_NEXT_INSN (post_index_insn) = NULL; + } +-} +- +-void +-pass_split_complex_instructions::split_ldp_stp (rtx_insn *insn) +-{ +- rtx_insn *prev_insn = PREV_INSN (insn); +- int number_of_sub_insns = XVECLEN (PATTERN (insn), 0); +- +- start_sequence (); + +- if (number_of_sub_insns == 2) +- split_simple_ldp (insn); +- else if (number_of_sub_insns == 3) +- split_ldp_with_offset (insn); +- else +- gcc_assert (false && "Broken complex insn vector"); +- +- rtx_insn *seq = get_insns (); +- unshare_all_rtl_in_chain (seq); +- end_sequence (); +- +- emit_insn_after_setloc (seq, prev_insn, INSN_LOCATION (insn)); +- delete_insn_and_edges (insn); ++ replace_insn (ldp_insn, insn_seq); + } + + void + pass_split_complex_instructions::split_ldp_ti (rtx_insn *insn) + { +- rtx_insn *prev_insn = PREV_INSN (insn); +- rtx_insn *load_insn_1 = make_insn_raw (copy_rtx (PATTERN (insn))); +- rtx_insn *load_insn_2 = make_insn_raw (copy_rtx (PATTERN (insn))); +- +- rtx reg_insn_1 = XEXP (PATTERN (load_insn_1), 0); +- rtx mem_insn_1 = XEXP (PATTERN (load_insn_1), 1); +- rtx mem_insn_2 = XEXP (PATTERN (load_insn_2), 1); +- +- PUT_MODE (mem_insn_1, DImode); +- PUT_MODE (mem_insn_2, DImode); +- +- int reg_no_1 = REGNO (reg_insn_1); ++ rtx pat = PATTERN (insn); ++ rtx memref = get_memref (insn, LDP_TI); ++ int unit_size = get_unit_size (memref, LDP_TI); ++ rtx base_reg = get_base_reg (memref); ++ rtx dest_reg = SET_DEST (pat); ++ ++ rtx reg_index_rtx = NULL; ++ rtx load_rtx_1 = NULL; ++ rtx load_rtx_2 = NULL; ++ bool post_index = false; ++ int offset = 0; + +- XEXP (PATTERN (load_insn_1), 0) = gen_rtx_REG (DImode, reg_no_1); +- XEXP (PATTERN (load_insn_2), 0) = gen_rtx_REG (DImode, reg_no_1 + 1); ++ rtx load_1_memref = gen_rtx_MEM (DImode, base_reg); + +- rtx load_insn_2_plus_expr = XEXP (XEXP (PATTERN (load_insn_2), 1), 0); +- if (GET_CODE (load_insn_2_plus_expr) == REG) ++ rtx addr_expr = XEXP (memref, 0); ++ if (GET_CODE (addr_expr) == PLUS) + { +- XEXP (XEXP (PATTERN (load_insn_2), 1), 0) +- = gen_rtx_PLUS (DImode, +- gen_rtx_REG (DImode, REGNO (load_insn_2_plus_expr)), +- GEN_INT (GET_MODE_SIZE (DImode))); ++ offset = INTVAL (XEXP (addr_expr, 1)); ++ XEXP (load_1_memref, 0) = gen_rtx_PLUS (DImode, base_reg, ++ GEN_INT (offset)); + } +- else +- { +- rtx load_insn_2_offset_expr +- = XEXP (XEXP (XEXP (PATTERN (load_insn_2), 1), 0), 1); + +- if (load_insn_2_offset_expr == NULL) +- return; +- +- if (GET_CODE (load_insn_2_offset_expr) == CONST_INT) +- { +- int load_insn_2_offset = XINT (load_insn_2_offset_expr, 0); +- XEXP (XEXP (XEXP (PATTERN (load_insn_2), 1), 0), 1) +- = GEN_INT (load_insn_2_offset + GET_MODE_SIZE (DImode)); +- } +- } +- +- start_sequence (); ++ rtx load_2_memref = gen_rtx_MEM (DImode, ++ gen_rtx_PLUS (DImode, base_reg, GEN_INT (offset + unit_size))); + +- int src_regno; +- rtx srs_reg_insn = XEXP (XEXP (PATTERN (load_insn_1), 1), 0); ++ load_rtx_1 = gen_rtx_SET (gen_rtx_REG (DImode, REGNO (dest_reg)), ++ load_1_memref); ++ load_rtx_2 = gen_rtx_SET (gen_rtx_REG (DImode, REGNO (dest_reg) + 1), ++ load_2_memref); + +- if (GET_CODE (srs_reg_insn) == REG) +- src_regno = REGNO (srs_reg_insn); +- else +- src_regno = REGNO (XEXP (srs_reg_insn, 0)); ++ if (GET_CODE (addr_expr) == PRE_INC || GET_CODE (addr_expr) == PRE_DEC ++ || GET_CODE (addr_expr) == POST_INC || GET_CODE (addr_expr) == POST_DEC) ++ { ++ /* The amount of increment or decrement is equal to size of ++ machine-mode of the containing MEMREF (see rtl.def). */ ++ int index_offset = GET_MODE_SIZE (GET_MODE (memref)).to_constant (); + +- /* in cases like ldp r1,r2,[r1] we emit ldr r2,[r1] first. */ +- if (src_regno == reg_no_1) +- std::swap (load_insn_1, load_insn_2); ++ if (GET_CODE (addr_expr) == PRE_DEC || GET_CODE (addr_expr) == POST_DEC) ++ index_offset = -index_offset; + +- rtx_insn *emited_load_insn_1 = emit_insn (PATTERN (load_insn_1)); +- rtx_insn *emited_load_insn_2 = emit_insn (PATTERN (load_insn_2)); ++ if (GET_CODE (addr_expr) == POST_INC || GET_CODE (addr_expr) == POST_DEC) ++ post_index = true; + +- INSN_CODE (emited_load_insn_1) +- = recog (PATTERN (emited_load_insn_1), emited_load_insn_1, 0); +- INSN_CODE (emited_load_insn_2) +- = recog (PATTERN (emited_load_insn_2), emited_load_insn_2, 0); ++ reg_index_rtx = gen_rtx_SET (base_reg, ++ gen_rtx_PLUS (DImode, base_reg, ++ GEN_INT (index_offset))); ++ } + +- rtx_insn *seq = get_insns (); +- unshare_all_rtl_in_chain (seq); +- end_sequence (); ++ /* In cases like ldp r1,r2,[r1] we emit ldr r2,[r1] first. */ ++ if (REGNO (base_reg) == REGNO (dest_reg)) ++ std::swap (load_rtx_1, load_rtx_2); + +- emit_insn_after_setloc (seq, prev_insn, INSN_LOCATION (insn)); +- delete_insn_and_edges (insn); +-} ++ /* Construct the instruction chain for subsequent emitting. */ ++ rtx_insn *insn_seq = make_insn_raw (load_rtx_1); ++ rtx_insn *load_insn_2 = make_insn_raw (load_rtx_2); ++ SET_NEXT_INSN (insn_seq) = load_insn_2; ++ SET_NEXT_INSN (load_insn_2) = NULL; ++ if (post_index && reg_index_rtx) ++ { ++ rtx_insn *post_index_insn = make_insn_raw (reg_index_rtx); ++ SET_NEXT_INSN (load_insn_2) = post_index_insn; ++ SET_NEXT_INSN (post_index_insn) = NULL; ++ } ++ else if (!post_index && reg_index_rtx) ++ { ++ rtx_insn *pre_index = make_insn_raw (reg_index_rtx); ++ SET_NEXT_INSN (pre_index) = insn_seq; ++ insn_seq = pre_index; ++ } + +-void +-pass_split_complex_instructions::split_complex_insn (rtx_insn *insn) +-{ +- complex_instructions_t insn_type = get_insn_type (insn); +- /* TODO: Add splitting of STP instructions. */ +- if (insn_type == LDP || insn_type == STP) +- split_ldp_stp (insn); +- else if (insn_type == LDP_TI) +- split_ldp_ti (insn); +- else +- gcc_assert (false && "Unsupported type of insn to split"); ++ replace_insn (insn, insn_seq); + } + +-pass_split_complex_instructions::complex_instructions_t ++pass_split_complex_instructions::mem_access_insn_t + pass_split_complex_instructions::get_insn_type (rtx_insn *insn) + { + if (!INSN_P (insn)) + return UNDEFINED; + +- rtx pat = PATTERN (insn); +- int icode = recog (PATTERN (insn), insn, NULL); ++ int icode = INSN_CODE (insn); ++ if (icode == -1) ++ icode = recog (PATTERN (insn), insn, 0); ++ bool has_wb = false; ++ ++ if (targetm.is_ldp_insn (icode, &has_wb)) ++ return (has_wb ? LDP_WB : LDP); + +- if (GET_CODE (pat) == PARALLEL) ++ if (targetm.is_stp_insn (icode, &has_wb)) ++ return (has_wb ? STP_WB : STP); ++ ++ rtx set_insn = single_set (insn); ++ if (set_insn && (GET_MODE (SET_SRC (set_insn)) == E_TImode ++ || GET_MODE (SET_DEST (set_insn)) == E_TImode)) + { +- if (targetm.is_ldp_insn (icode)) +- { +- return LDP; +- } +- if (targetm.is_stp_insn (icode)) +- { +- return STP; +- } +- else +- { +- return UNDEFINED; +- } ++ if (MEM_P (SET_SRC (set_insn)) && REG_P (SET_DEST (set_insn))) ++ return LDP_TI; ++ if (MEM_P (SET_DEST (set_insn)) && REG_P (SET_SRC (set_insn))) ++ return STP_TI; + } +- rtx set_insn = single_set (insn); +- if (set_insn && GET_CODE (XEXP (set_insn, 1)) == MEM +- && GET_MODE (XEXP (set_insn, 1)) == E_TImode) +- return LDP_TI; ++ ++ if (set_insn && MEM_P (SET_DEST (set_insn)) && REG_P (SET_SRC (set_insn)) ++ && GET_MODE (SET_DEST (set_insn)) != BLKmode) ++ return STR; + + return UNDEFINED; + } +diff --git a/gcc/target.def b/gcc/target.def +index a3a50b474..8797a21d5 100644 +--- a/gcc/target.def ++++ b/gcc/target.def +@@ -2679,13 +2679,19 @@ modes and they have different conditional execution capability, such as ARM.", + + DEFHOOK + (is_ldp_insn, +- "Return true if icode is corresponding to any of the LDP instruction types.", +- bool, (int icode), NULL) ++ "Return true if @var{icode} is corresponding to any of the LDP instruction\n\ ++types. If @var{has_wb} is not NULL then its value is set to true if LDP\n\ ++contains post-index or pre-index operation.", ++ bool, (int icode, bool *has_wb), ++ NULL) + + DEFHOOK + (is_stp_insn, +- "Return true if icode is corresponding to any of the STP instruction types.", +- bool, (int icode), NULL) ++ "Return true if @var{icode} is corresponding to any of the STP instruction\n\ ++types. If @var{has_wb} is not NULL then its value is set to true if STP\n\ ++contains post-index or pre-index operation.", ++ bool, (int icode, bool *has_wb), ++ NULL) + + DEFHOOK + (gen_ccmp_first, +diff --git a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c +index 3918d43f6..2d42231dc 100644 +--- a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c ++++ b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c +@@ -1,5 +1,5 @@ + /* { dg-do compile { target aarch64-*-* } } */ +-/* { dg-additional-options "-fsplit-ldp-stp" } */ ++/* { dg-additional-options "-O1 -fsplit-ldp-stp" } */ + /* + * Tests are: + * Patterns where LDP insns should NOT be split +@@ -15,6 +15,9 @@ simple_ldp_after_store () + (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK) + (cinsn 228 (set (reg/i:DI sp) + (reg/i:DI x0))) ++ (cinsn 238 (set (reg/i:DI x1) ++ (reg/i:DI x0))) ++ + (cinsn 101 (set (mem/c:DI + (plus:DI (reg/f:DI sp) + (const_int 32))[1 S4 A32])(reg:DI x0))) +@@ -24,11 +27,27 @@ simple_ldp_after_store () + (set (reg:DI x30) + (mem:DI (plus:DI (reg/f:DI sp) + (const_int 16)) [1 S4 A32]))])) +- (cinsn 11 (use (reg/i:DI sp))) +- (cinsn 12 (use (reg/i:DI cc))) +- (cinsn 13 (use (reg/i:DI x29))) +- (cinsn 14 (use (reg/i:DI x30))) +- (cinsn 15 (use (reg/i:DI x0))) ++ (cinsn 11 (use (reg/i:DI x29))) ++ (cinsn 12 (use (reg/i:DI x30))) ++ ++ /* stp x0, x2, [x1]. */ ++ (cinsn 102 (parallel [ ++ (set (mem:DI (reg/f:DI x1) [1 S4 A32]) ++ (reg:DI x0)) ++ (set (mem:DI (plus:DI (reg/f:DI x1) (const_int 8)) [1 S4 A32]) ++ (reg:DI x2))])) ++ /* ldp x5, x6, [x1]. */ ++ (cinsn 13 (parallel [ ++ (set (reg:DI x5) (mem:DI (reg/f:DI x1) [1 S4 A32])) ++ (set (reg:DI x6) (mem:DI (plus:DI (reg/f:DI x1) ++ (const_int 8)) [1 S4 A32])) ++ ])) ++ (cinsn 14 (use (reg/i:DI x5))) ++ (cinsn 15 (use (reg/i:DI x6))) ++ ++ (cinsn 100 (use (reg/i:DI sp))) ++ (cinsn 200 (use (reg/i:DI cc))) ++ (cinsn 300 (use (reg/i:DI x0))) + (edge-to exit (flags "FALLTHRU")) + ) ;; block 2 + ) ;; insn-chain +@@ -70,5 +89,5 @@ ldp_after_store_in_different_bb () + ) ;; function "ldp_after_store_in_different_bb" + } + +-/* Verify that the output code contains exactly 2 ldp. */ +-/* { dg-final { scan-assembler-times {ldp\t} 2 } } */ +\ No newline at end of file ++/* Verify that the output code contains exactly 3 ldp. */ ++/* { dg-final { scan-assembler-times {ldp\t} 3 } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c +index 653c30f83..59ff82df9 100644 +--- a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c ++++ b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c +@@ -1,5 +1,5 @@ + /* { dg-do compile { target aarch64-*-* } } */ +-/* { dg-additional-options "-fsplit-ldp-stp" } */ ++/* { dg-additional-options "-O1 -fsplit-ldp-stp" } */ + /* + * Test is: + * Pattern where LDP insns should be split with rearrangement in order +diff --git a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c +index dc9f26efb..e25762160 100644 +--- a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c ++++ b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c +@@ -13,48 +13,131 @@ simple_ldp_after_store () + (block 2 + (edge-from entry (flags "FALLTHRU")) + (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK) ++ /* mov sp, x0. */ + (cinsn 228 (set (reg/i:DI sp) +- (reg/i:DI x0))) ++ (reg/i:DI x0))) ++ /* mov x1, x0. */ + (cinsn 238 (set (reg/i:DI x1) +- (reg/i:DI x0))) ++ (reg/i:DI x0))) + ++ /* str x0, [sp, 8]. */ + (cinsn 101 (set (mem/c:DI + (plus:DI (reg/f:DI sp) + (const_int 8))[1 S4 A32])(reg:DI x0))) ++ /* ldp x29, x30, [sp, 8]. */ + (cinsn 10 (parallel [ + (set (reg:DI x29) + (mem:DI (plus:DI (reg/f:DI sp) (const_int 8)) [1 S4 A32])) + (set (reg:DI x30) + (mem:DI (plus:DI (reg/f:DI sp) + (const_int 16)) [1 S4 A32]))])) ++ (cinsn 11 (use (reg/i:DI x29))) ++ (cinsn 12 (use (reg/i:DI x30))) + ++ /* str x0, [x1, -16]. */ + (cinsn 102 (set (mem/c:DI (plus:DI (reg/f:DI x1) + (const_int -16)) [1 S4 A32]) + (reg:DI x0))) +- (cinsn 11 (parallel [ ++ /* ldp x3, x4, [x1, -16]. */ ++ (cinsn 13 (parallel [ + (set (reg:DI x3) + (mem:DI (plus:DI (reg/f:DI x1) (const_int -16)) [1 S4 A32])) + (set (reg:DI x4) + (mem:DI (plus:DI (reg/f:DI x1) (const_int -8)) [1 S4 A32])) + ])) ++ (cinsn 14 (use (reg/i:DI x3))) ++ (cinsn 15 (use (reg/i:DI x4))) + ++ /* str x0, [x1]. */ + (cinsn 103 (set (mem/c:DI (reg/f:DI x1) [1 S4 A32]) + (reg:DI x0))) +- (cinsn 12 (parallel [ ++ /* ldp x5, x6, [x1]. */ ++ (cinsn 16 (parallel [ + (set (reg:DI x5) (mem:DI (reg/f:DI x1) [1 S4 A32])) + (set (reg:DI x6) (mem:DI (plus:DI (reg/f:DI x1) + (const_int 8)) [1 S4 A32])) + ])) ++ (cinsn 17 (use (reg/i:DI x5))) ++ (cinsn 18 (use (reg/i:DI x6))) + +- (cinsn 13 (use (reg/i:DI sp))) +- (cinsn 14 (use (reg/i:DI cc))) +- (cinsn 15 (use (reg/i:DI x29))) +- (cinsn 16 (use (reg/i:DI x30))) +- (cinsn 17 (use (reg/i:DI x0))) +- (cinsn 18 (use (reg/i:DI x3))) +- (cinsn 19 (use (reg/i:DI x4))) +- (cinsn 20 (use (reg/i:DI x5))) +- (cinsn 21 (use (reg/i:DI x6))) ++ /* ldp x29, x30, [sp], 96. */ ++ (cinsn 19 (parallel [ ++ (set (reg/f:DI sp) ++ (plus:DI (reg/f:DI sp) (const_int 96))) ++ (set (reg:DI x29) ++ (mem:DI (reg/f:DI sp) [1 S4 A32])) ++ (set (reg:DI x30) ++ (mem:DI (plus:DI (reg/f:DI sp) ++ (const_int 8)) [1 S4 A32]))])) ++ (cinsn 20 (use (reg/i:DI x29))) ++ (cinsn 21 (use (reg/i:DI x30))) ++ ++ /* stp x0, x2, [x1, 128]. */ ++ (cinsn 104 (parallel [ ++ (set (mem:DI (plus:DI (reg/f:DI x1) (const_int 128)) [1 S4 A32]) ++ (reg:DI x0)) ++ (set (mem:DI (plus:DI (reg/f:DI x1) (const_int 136)) [1 S4 A32]) ++ (reg:DI x2))])) ++ /* ldp x29, x30, [x1, 120]. */ ++ (cinsn 22 (parallel [ ++ (set (reg:DI x29) ++ (mem:DI (plus:DI (reg/f:DI x1) (const_int 120)) [1 S4 A32])) ++ (set (reg:DI x30) ++ (mem:DI (plus:DI (reg/f:DI x1) (const_int 128)) [1 S4 A32]))])) ++ (cinsn 23 (use (reg/i:DI x29))) ++ (cinsn 24 (use (reg/i:DI x30))) ++ ++ /* stp x0, x2, [x1, 128]. */ ++ (cinsn 105 (parallel [ ++ (set (mem:DI (plus:DI (reg/f:DI x1) (const_int 128)) [1 S4 A32]) ++ (reg:DI x0)) ++ (set (mem:DI (plus:DI (reg/f:DI x1) (const_int 136)) [1 S4 A32]) ++ (reg:DI x2))])) ++ /* ldp x3, x4, [x1, 136]. */ ++ (cinsn 25 (parallel [ ++ (set (reg:DI x3) ++ (mem:DI (plus:DI (reg/f:DI x1) (const_int 136)) [1 S4 A32])) ++ (set (reg:DI x4) ++ (mem:DI (plus:DI (reg/f:DI x1) (const_int 144)) [1 S4 A32])) ++ ])) ++ (cinsn 26 (use (reg/i:DI x3))) ++ (cinsn 27 (use (reg/i:DI x4))) ++ ++ /* stp w0, w2, [x1, 32]. */ ++ (cinsn 106 (parallel [ ++ (set (mem:SI (plus:DI (reg/f:DI x1) (const_int 32)) [1 S4 A32]) ++ (reg:SI x0)) ++ (set (mem:SI (plus:DI (reg/f:DI x1) (const_int 36)) [1 S4 A32]) ++ (reg:SI x2))])) ++ /* ldp x5, x6, [x1, 32]. */ ++ (cinsn 28 (parallel [ ++ (set (reg:DI x5) (mem:DI (plus:DI (reg/f:DI x1) ++ (const_int 32)) [1 S4 A32])) ++ (set (reg:DI x6) (mem:DI (plus:DI (reg/f:DI x1) ++ (const_int 40)) [1 S4 A32])) ++ ])) ++ (cinsn 29 (use (reg/i:DI x5))) ++ (cinsn 30 (use (reg/i:DI x6))) ++ ++ /* stp w0, w2, [x1, 40]. */ ++ (cinsn 107 (parallel [ ++ (set (mem:SI (plus:DI (reg/f:DI x1) (const_int 40)) [1 S4 A32]) ++ (reg:SI x0)) ++ (set (mem:SI (plus:DI (reg/f:DI x1) (const_int 44)) [1 S4 A32]) ++ (reg:SI x2))])) ++ /* ldp x5, x6, [x1, 32]. */ ++ (cinsn 31 (parallel [ ++ (set (reg:DI x5) (mem:DI (plus:DI (reg/f:DI x1) ++ (const_int 32)) [1 S4 A32])) ++ (set (reg:DI x6) (mem:DI (plus:DI (reg/f:DI x1) ++ (const_int 40)) [1 S4 A32])) ++ ])) ++ (cinsn 32 (use (reg/i:DI x5))) ++ (cinsn 33 (use (reg/i:DI x6))) ++ ++ (cinsn 100 (use (reg/i:DI sp))) ++ (cinsn 200 (use (reg/i:DI cc))) ++ (cinsn 400 (use (reg/i:DI x0))) + (edge-to exit (flags "FALLTHRU")) + ) ;; block 2 + ) ;; insn-chain +@@ -69,43 +152,83 @@ ldp_ti_after_store () + (block 2 + (edge-from entry (flags "FALLTHRU")) + (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK) ++ /* mov sp, x0. */ + (cinsn 228 (set (reg/i:DI sp) +- (reg/i:DI x0))) ++ (reg/i:DI x0))) ++ /* mov x2, x0. */ + (cinsn 238 (set (reg/i:DI x2) +- (reg/i:DI x0))) +- ++ (reg/i:DI x0))) ++ /* str x0, [sp, 136]. */ + (cinsn 101 (set (mem/c:DI + (plus:DI (reg/f:DI sp) + (const_int 136))[1 S4 A32])(reg:DI x0))) +- (insn 81 (set (reg:TI x0 [1 S4 A32]) ++ /* ldp x0, x1, [sp, 136]. */ ++ (cinsn 81 (set (reg:TI x0 [1 S4 A32]) + (mem/c:TI (plus:DI (reg/f:DI sp) +- (const_int 136 )) [1 S4 A32])) +- (expr_list:REG_EQUIV (mem/c:TI (plus:DI (reg/f:DI sfp) +- (const_int -24 )) [1 S4 A32]) +- (nil))) +- ++ (const_int 136)) [1 S4 A32]))) ++ /* str x0, [x2, -16]. */ + (cinsn 102 (set (mem/c:DI (plus:DI (reg/f:DI x2) +- (const_int -16)) [1 S4 A32]) ++ (const_int -16)) [1 S4 A32]) + (reg:DI x0))) +- (insn 82 (set (reg:TI x3 [1 S4 A32]) ++ /* ldp x3, x4, [x2, -16]. */ ++ (cinsn 82 (set (reg:TI x3 [1 S4 A32]) + (mem/c:TI (plus:DI (reg/f:DI x2) +- (const_int -16)) [1 S4 A32]))) +- ++ (const_int -16)) [1 S4 A32]))) ++ /* str x0, [x2]. */ + (cinsn 103 (set (mem/c:DI (reg/f:DI x2) [1 S4 A32]) + (reg:DI x0))) +- (insn 83 (set (reg:TI x5 [1 S4 A32]) ++ /* ldp x5, x6, [x2]. */ ++ (cinsn 83 (set (reg:TI x5 [1 S4 A32]) + (mem/c:TI (reg/f:DI x2) [1 S4 A32]))) + ++ /* stp x0, x1, [sp, -8]. */ ++ (cinsn 104 (set (mem:TI (plus:DI (reg/v/f:DI sp) ++ (const_int -8)) [1 S4 A32]) ++ (reg:TI x0))) ++ /* ldp x5, x6, [sp], -16. */ ++ (cinsn 84 (set (reg/v:TI x5 [1 S4 A32]) ++ (mem:TI (post_dec:DI (reg/v/f:DI sp)) [1 S4 A32]))) ++ (cinsn 85 (use (reg/i:DI x5))) ++ (cinsn 86 (use (reg/i:DI x6))) ++ ++ /* stp x0, x1, [sp, 8]. */ ++ (cinsn 105 (set (mem:TI (plus:DI (reg/v/f:DI sp) ++ (const_int 8)) [1 S4 A32]) ++ (reg:TI x0))) ++ /* ldp x5, x6, [sp], -16. */ ++ (cinsn 87 (set (reg/v:TI x5 [1 S4 A32]) ++ (mem:TI (post_dec:DI (reg/v/f:DI sp)) [1 S4 A32]))) ++ (cinsn 88 (use (reg/i:DI x5))) ++ (cinsn 89 (use (reg/i:DI x6))) ++ ++ /* Intersects with insn 102. */ ++ /* ldp x2, x3, [x2, -16]!. */ ++ (cinsn 90 (set (reg/v:TI x2 [1 S4 A32]) ++ (mem:TI (pre_dec:DI (reg/v/f:DI x2)) [1 S4 A32]))) ++ (cinsn 91 (use (reg/i:DI x2))) ++ (cinsn 92 (use (reg/i:DI x3))) ++ ++ /* mov x2, x0. */ ++ (cinsn 248 (set (reg/i:DI x2) ++ (reg/i:DI x0))) ++ /* str x0, [x2, 16]. */ ++ (cinsn 106 (set (mem:DI (plus:DI (reg/v/f:DI x2) ++ (const_int 16)) [1 S4 A32]) ++ (reg:DI x0))) ++ /* ldp x3, x4, [x2, 16]!. */ ++ (cinsn 93 (set (reg/v:TI x3 [1 S4 A32]) ++ (mem:TI (pre_inc:DI (reg/v/f:DI x2)) [1 S4 A32]))) ++ (cinsn 94 (use (reg/i:DI x3))) ++ (cinsn 95 (use (reg/i:DI x4))) ++ + (cinsn 11 (use (reg/i:DI sp))) + (cinsn 12 (use (reg/i:DI cc))) + (cinsn 13 (use (reg/i:DI x29))) + (cinsn 14 (use (reg/i:DI x30))) + (cinsn 15 (use (reg/i:DI x0))) + (cinsn 16 (use (reg/i:DI x3))) +- (cinsn 17 (use (reg/i:DI x5))) + (cinsn 18 (use (reg/i:DI x1))) + (cinsn 19 (use (reg/i:DI x4))) +- (cinsn 20 (use (reg/i:DI x6))) + (edge-to exit (flags "FALLTHRU")) + ) ;; block 2 + ) ;; insn-chain +-- +2.33.0 + diff --git a/0044-Port-maxmin-patch-to-GCC-12.patch b/0044-Port-maxmin-patch-to-GCC-12.patch new file mode 100644 index 0000000000000000000000000000000000000000..2423c12ca70fdc1daa4d4d6cc9000e336238c541 --- /dev/null +++ b/0044-Port-maxmin-patch-to-GCC-12.patch @@ -0,0 +1,378 @@ +From a3013c074cd2ab5f71eb98a587a627f38c68656c Mon Sep 17 00:00:00 2001 +From: Diachkov Ilia +Date: Thu, 22 Feb 2024 17:07:24 +0800 +Subject: [PATCH 12/18] Port maxmin patch to GCC 12 + +--- + gcc/config/aarch64/aarch64-simd.md | 256 ++++++++++++++++++++++++++ + gcc/config/aarch64/predicates.md | 19 ++ + gcc/testsuite/gcc.dg/combine-maxmin.c | 46 +++++ + 3 files changed, 321 insertions(+) + create mode 100755 gcc/testsuite/gcc.dg/combine-maxmin.c + +diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md +index 82f73805f..de92802f5 100644 +--- a/gcc/config/aarch64/aarch64-simd.md ++++ b/gcc/config/aarch64/aarch64-simd.md +@@ -1138,6 +1138,82 @@ + [(set_attr "type" "neon_compare,neon_shift_imm")] + ) + ++;; Simplify the extension with following truncation for shift+neg operation. ++ ++(define_insn_and_split "*aarch64_sshr_neg_v8hi" ++ [(set (match_operand:V8HI 0 "register_operand" "=w") ++ (vec_concat:V8HI ++ (truncate:V4HI ++ (ashiftrt:V4SI ++ (neg:V4SI ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 1 "register_operand") ++ (match_operand:V8HI 3 "vect_par_cnst_lo_half")))) ++ (match_operand:V4SI 2 "maxmin_arith_shift_operand"))) ++ (truncate:V4HI ++ (ashiftrt:V4SI ++ (neg:V4SI ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (match_dup 1) ++ (match_operand:V8HI 4 "vect_par_cnst_hi_half")))) ++ (match_dup 2)))))] ++ "TARGET_SIMD" ++ "#" ++ "&& true" ++ [(set (match_operand:V8HI 0 "register_operand" "=w") ++ (ashiftrt:V8HI ++ (neg:V8HI ++ (match_operand:V8HI 1 "register_operand" "w")) ++ (match_operand:V8HI 2 "aarch64_simd_imm_minus_one")))] ++ { ++ /* Reduce the shift amount to smaller mode. */ ++ int val = INTVAL (CONST_VECTOR_ENCODED_ELT (operands[2], 0)) ++ - (GET_MODE_UNIT_BITSIZE (GET_MODE (operands[2])) / 2); ++ operands[2] = aarch64_simd_gen_const_vector_dup (V8HImode, val); ++ } ++ [(set_attr "type" "multiple")] ++) ++ ++;; The helper definition that allows combiner to use the previous pattern. ++ ++(define_insn_and_split "*aarch64_sshr_neg_tmpv8hi" ++ [(set (match_operand:V8HI 0 "register_operand" "=w") ++ (vec_concat:V8HI ++ (truncate:V4HI ++ (ashiftrt:V4SI ++ (neg:V4SI ++ (match_operand:V4SI 1 "register_operand" "w")) ++ (match_operand:V4SI 2 "maxmin_arith_shift_operand"))) ++ (truncate:V4HI ++ (ashiftrt:V4SI ++ (neg:V4SI ++ (match_operand:V4SI 3 "register_operand" "w")) ++ (match_dup 2)))))] ++ "TARGET_SIMD" ++ "#" ++ "&& true" ++ [(set (match_operand:V4SI 1 "register_operand" "=w") ++ (ashiftrt:V4SI ++ (neg:V4SI ++ (match_dup 1)) ++ (match_operand:V4SI 2 "maxmin_arith_shift_operand"))) ++ (set (match_operand:V4SI 3 "register_operand" "=w") ++ (ashiftrt:V4SI ++ (neg:V4SI ++ (match_dup 3)) ++ (match_dup 2))) ++ (set (match_operand:V8HI 0 "register_operand" "=w") ++ (vec_concat:V8HI ++ (truncate:V4HI ++ (match_dup 1)) ++ (truncate:V4HI ++ (match_dup 3))))] ++ "" ++ [(set_attr "type" "multiple")] ++) ++ + (define_insn "*aarch64_simd_sra" + [(set (match_operand:VDQ_I 0 "register_operand" "=w") + (plus:VDQ_I +@@ -1714,6 +1790,26 @@ + } + ) + ++(define_insn "vec_pack_trunc_shifted_" ++ [(set (match_operand: 0 "register_operand" "=&w") ++ (vec_concat: ++ (truncate: ++ (ashiftrt:VQN (match_operand:VQN 1 "register_operand" "w") ++ (match_operand:VQN 2 "half_size_operand" "w"))) ++ (truncate: ++ (ashiftrt:VQN (match_operand:VQN 3 "register_operand" "w") ++ (match_operand:VQN 4 "half_size_operand" "w")))))] ++ "TARGET_SIMD" ++ { ++ if (BYTES_BIG_ENDIAN) ++ return "uzp2\\t%0., %3., %1."; ++ else ++ return "uzp2\\t%0., %1., %3."; ++ } ++ [(set_attr "type" "neon_permute") ++ (set_attr "length" "4")] ++) ++ + (define_insn "aarch64_shrn_insn_le" + [(set (match_operand: 0 "register_operand" "=w") + (vec_concat: +@@ -6652,6 +6748,166 @@ + [(set_attr "type" "neon_tst")] + ) + ++;; Simplify the extension with following truncation for cmtst-like operation. ++ ++(define_insn_and_split "*aarch64_cmtst_arith_v8hi" ++ [(set (match_operand:V8HI 0 "register_operand" "=w") ++ (vec_concat:V8HI ++ (plus:V4HI ++ (truncate:V4HI ++ (eq:V4SI ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (and:V8HI ++ (match_operand:V8HI 1 "register_operand") ++ (match_operand:V8HI 2 "aarch64_bic_imm_for_maxmin")) ++ (match_operand:V8HI 3 "vect_par_cnst_lo_half"))) ++ (match_operand:V4SI 4 "aarch64_simd_or_scalar_imm_zero"))) ++ (match_operand:V4HI 5 "aarch64_simd_imm_minus_one")) ++ (plus:V4HI ++ (truncate:V4HI ++ (eq:V4SI ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (and:V8HI ++ (match_dup 1) ++ (match_dup 2)) ++ (match_operand:V8HI 6 "vect_par_cnst_hi_half"))) ++ (match_dup 4))) ++ (match_dup 5))))] ++ "TARGET_SIMD && !reload_completed" ++ "#" ++ "&& true" ++ [(set (match_operand:V8HI 6 "register_operand" "=w") ++ (match_operand:V8HI 2 "aarch64_bic_imm_for_maxmin")) ++ (set (match_operand:V8HI 0 "register_operand" "=w") ++ (plus:V8HI ++ (eq:V8HI ++ (and:V8HI ++ (match_operand:V8HI 1 "register_operand" "w") ++ (match_dup 6)) ++ (match_operand:V8HI 4 "aarch64_simd_imm_zero")) ++ (match_operand:V8HI 5 "aarch64_simd_imm_minus_one")))] ++ { ++ if (can_create_pseudo_p ()) ++ { ++ int val = INTVAL (CONST_VECTOR_ENCODED_ELT (operands[4], 0)); ++ operands[4] = aarch64_simd_gen_const_vector_dup (V8HImode, val); ++ int val2 = INTVAL (CONST_VECTOR_ENCODED_ELT (operands[5], 0)); ++ operands[5] = aarch64_simd_gen_const_vector_dup (V8HImode, val2); ++ ++ operands[6] = gen_reg_rtx (V8HImode); ++ } ++ else ++ FAIL; ++ } ++ [(set_attr "type" "neon_tst_q")] ++) ++ ++;; Three helper definitions that allow combiner to use the previous pattern. ++ ++(define_insn_and_split "*aarch64_cmtst_arith_tmp_lo_v8hi" ++ [(set (match_operand:V4SI 0 "register_operand" "=w") ++ (neg:V4SI ++ (eq:V4SI ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (and:V8HI ++ (match_operand:V8HI 1 "register_operand") ++ (match_operand:V8HI 2 "aarch64_bic_imm_for_maxmin")) ++ (match_operand:V8HI 3 "vect_par_cnst_lo_half"))) ++ (match_operand:V4SI 4 "aarch64_simd_or_scalar_imm_zero"))))] ++ "TARGET_SIMD && !reload_completed" ++ "#" ++ "&& true" ++ [(set (match_operand:V8HI 5 "register_operand" "=w") ++ (and:V8HI ++ (match_operand:V8HI 1 "register_operand") ++ (match_operand:V8HI 2 "aarch64_bic_imm_for_maxmin"))) ++ (set (match_operand:V4SI 0 "register_operand" "=w") ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (match_dup 5) ++ (match_operand:V8HI 3 "vect_par_cnst_lo_half")))) ++ (set (match_dup 0) ++ (neg:V4SI ++ (eq:V4SI ++ (match_dup 0) ++ (match_operand:V4SI 4 "aarch64_simd_or_scalar_imm_zero"))))] ++ { ++ if (can_create_pseudo_p ()) ++ operands[5] = gen_reg_rtx (V8HImode); ++ else ++ FAIL; ++ } ++ [(set_attr "type" "multiple")] ++) ++ ++(define_insn_and_split "*aarch64_cmtst_arith_tmp_hi_v8hi" ++ [(set (match_operand:V4SI 0 "register_operand" "=w") ++ (neg:V4SI ++ (eq:V4SI ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (and:V8HI ++ (match_operand:V8HI 1 "register_operand") ++ (match_operand:V8HI 2 "aarch64_bic_imm_for_maxmin")) ++ (match_operand:V8HI 3 "vect_par_cnst_hi_half"))) ++ (match_operand:V4SI 4 "aarch64_simd_or_scalar_imm_zero"))))] ++ "TARGET_SIMD && !reload_completed" ++ "#" ++ "&& true" ++ [(set (match_operand:V8HI 5 "register_operand" "=w") ++ (and:V8HI ++ (match_operand:V8HI 1 "register_operand") ++ (match_operand:V8HI 2 "aarch64_bic_imm_for_maxmin"))) ++ (set (match_operand:V4SI 0 "register_operand" "=w") ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (match_dup 5) ++ (match_operand:V8HI 3 "vect_par_cnst_hi_half")))) ++ (set (match_dup 0) ++ (neg:V4SI ++ (eq:V4SI ++ (match_dup 0) ++ (match_operand:V4SI 4 "aarch64_simd_or_scalar_imm_zero"))))] ++ { ++ if (can_create_pseudo_p ()) ++ operands[5] = gen_reg_rtx (V8HImode); ++ else ++ FAIL; ++ } ++ [(set_attr "type" "multiple")] ++) ++ ++(define_insn_and_split "*aarch64_cmtst_arith_tmpv8hi" ++ [(set (match_operand:V8HI 0 "register_operand" "=w") ++ (vec_concat:V8HI ++ (truncate:V4HI ++ (not:V4SI ++ (match_operand:V4SI 1 "register_operand" "w"))) ++ (truncate:V4HI ++ (not:V4SI ++ (match_operand:V4SI 2 "register_operand" "w")))))] ++ "TARGET_SIMD" ++ "#" ++ "&& true" ++ [(set (match_operand:V4SI 1 "register_operand" "=w") ++ (not:V4SI ++ (match_dup 1))) ++ (set (match_operand:V4SI 2 "register_operand" "=w") ++ (not:V4SI ++ (match_dup 2))) ++ (set (match_operand:V8HI 0 "register_operand" "=w") ++ (vec_concat:V8HI ++ (truncate:V4HI ++ (match_dup 1)) ++ (truncate:V4HI ++ (match_dup 2))))] ++ "" ++ [(set_attr "type" "multiple")] ++) ++ + (define_insn_and_split "aarch64_cmtstdi" + [(set (match_operand:DI 0 "register_operand" "=w,r") + (neg:DI +diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md +index 07c14aacb..1b8496c07 100644 +--- a/gcc/config/aarch64/predicates.md ++++ b/gcc/config/aarch64/predicates.md +@@ -118,6 +118,25 @@ + (match_test "aarch64_simd_valid_immediate (op, NULL, + AARCH64_CHECK_ORR)")))) + ++(define_predicate "aarch64_bic_imm_for_maxmin" ++ (match_code "const_vector") ++{ ++ if (!aarch64_simd_valid_immediate (op, NULL, AARCH64_CHECK_BIC)) ++ return false; ++ op = unwrap_const_vec_duplicate (op); ++ unsigned int size = GET_MODE_UNIT_BITSIZE (mode); ++ return CONST_INT_P (op) ++ && ((~UINTVAL (op)) < (((long unsigned int) 1 << size) - 1)); ++}) ++ ++(define_predicate "maxmin_arith_shift_operand" ++ (match_code "const_vector") ++{ ++ op = unwrap_const_vec_duplicate (op); ++ unsigned int size = GET_MODE_UNIT_BITSIZE (mode) - 1; ++ return CONST_INT_P (op) && (UINTVAL (op) == size); ++}) ++ + (define_predicate "aarch64_reg_or_bic_imm" + (ior (match_operand 0 "register_operand") + (and (match_code "const_vector") +diff --git a/gcc/testsuite/gcc.dg/combine-maxmin.c b/gcc/testsuite/gcc.dg/combine-maxmin.c +new file mode 100755 +index 000000000..06bce7029 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/combine-maxmin.c +@@ -0,0 +1,46 @@ ++/* { dg-do compile { target aarch64-*-* } } */ ++/* { dg-options "-O3 -fdump-rtl-combine-all" } */ ++ ++/* The test checks usage of smax/smin insns for clip evaluation and ++ * uzp1/uzp2 insns for vector element narrowing. It's inspired by ++ * sources of x264 codec. */ ++ ++typedef unsigned char uint8_t; ++typedef long int intptr_t; ++typedef signed short int int16_t; ++ ++static __attribute__((always_inline)) inline uint8_t clip (int x ) ++{ ++ return ( (x & ~((1 << 8)-1)) ? (-x)>>31 & ((1 << 8)-1) : x ); ++} ++ ++void hf (uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, ++ intptr_t stride, int width, int height, int16_t *buf) ++{ ++ const int pad = (8 > 9) ? (-10 * ((1 << 8)-1)) : 0; ++ for( int y = 0; y < height; y++ ) { ++ for( int x = -2; x < width+3; x++ ) { ++ int v = ((src)[x-2*stride] + (src)[x+3*stride] - 5*((src)[x-stride] ++ + (src)[x+2*stride]) + 20*((src)[x] + (src)[x+stride])); ++ dstv[x] = clip ( (v + 16) >> 5 ); ++ buf[x+2] = v + pad; ++ } ++ for( int x = 0; x < width; x++ ) ++ dstc[x] = clip ((((buf+2)[x-2*1] + (buf+2)[x+3*1] - 5*((buf+2)[x-1] ++ + (buf+2)[x+2*1]) + 20*((buf+2)[x] + (buf+2)[x+1])) ++ - 32*pad + 512) >> 10); ++ for( int x = 0; x < width; x++ ) ++ dsth[x] = clip ((((src)[x-2*1] + (src)[x+3*1] - 5*((src)[x-1] ++ + (src)[x+2*1]) + 20*((src)[x] + (src)[x+1])) ++ + 16) >> 5); ++ dsth += stride; ++ dstv += stride; ++ dstc += stride; ++ src += stride; ++ } ++} ++ ++/* { dg-final { scan-assembler-times {smax\t} 4 } } */ ++/* { dg-final { scan-assembler-times {smin\t} 4 } } */ ++/* { dg-final { scan-assembler-times {cmtst\t} 2 } } */ ++/* { dg-final { scan-assembler-times {uzp1\t} 6 } } */ +-- +2.33.0 + diff --git a/0045-Port-moving-minmask-pattern-to-gimple-to-GCC-12.patch b/0045-Port-moving-minmask-pattern-to-gimple-to-GCC-12.patch new file mode 100644 index 0000000000000000000000000000000000000000..a5a786f6fb6cc05c72ed413ea09775d4279f20a7 --- /dev/null +++ b/0045-Port-moving-minmask-pattern-to-gimple-to-GCC-12.patch @@ -0,0 +1,239 @@ +From 11da40d18e35219961226d40f11b0702b8649044 Mon Sep 17 00:00:00 2001 +From: Pronin Alexander 00812787 +Date: Thu, 22 Feb 2024 17:13:27 +0800 +Subject: [PATCH 13/18] Port moving minmask pattern to gimple to GCC 12 + +--- + gcc/common.opt | 4 + + gcc/match.pd | 104 ++++++++++++++++++++++++ + gcc/testsuite/gcc.dg/combine-maxmin-1.c | 15 ++++ + gcc/testsuite/gcc.dg/combine-maxmin-2.c | 14 ++++ + gcc/testsuite/gcc.dg/combine-maxmin.c | 19 +++-- + 5 files changed, 151 insertions(+), 5 deletions(-) + create mode 100644 gcc/testsuite/gcc.dg/combine-maxmin-1.c + create mode 100644 gcc/testsuite/gcc.dg/combine-maxmin-2.c + +diff --git a/gcc/common.opt b/gcc/common.opt +index 6c6fabb31..3a5004271 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1846,6 +1846,10 @@ fif-conversion-gimple + Common Var(flag_if_conversion_gimple) Optimization + Perform conversion of conditional jumps to branchless equivalents during gimple transformations. + ++fconvert-minmax ++Common Var(flag_convert_minmax) Optimization ++Convert saturating clipping to min max. ++ + fstack-reuse= + Common Joined RejectNegative Enum(stack_reuse_level) Var(flag_stack_reuse) Init(SR_ALL) Optimization + -fstack-reuse=[all|named_vars|none] Set stack reuse level for local variables. +diff --git a/gcc/match.pd b/gcc/match.pd +index 61866cb90..3a19e93b3 100644 +--- a/gcc/match.pd ++++ b/gcc/match.pd +@@ -8031,3 +8031,107 @@ and, + (plus:c@4 (op2:c @0 @1) + (plus:c@5 (double_size_mul_overflow_check_lo @0 @1 @3) (op3:c @0 @1)))) + (if (single_use (@4) && single_use (@5))))) ++ ++/* MinMax pattern matching helpers. More info on the transformation below. */ ++ ++/* Match (a & 0b11..100..0) pattern. */ ++(match (minmax_cmp_arg @0 @1) ++ (bit_and @0 INTEGER_CST@1) ++ (if (wi::popcount (~wi::to_widest (@1) + 1) == 1))) ++ ++/* Match (inversed_sign_bit >> sign_bit_pos) pattern. ++ This statement is blocking for the transformation of unsigned integers. ++ Do type check here to avoid unnecessary duplications. */ ++(match (minmax_sat_arg @0) ++ (rshift (negate @0) INTEGER_CST@1) ++ (if (!TYPE_UNSIGNED (TREE_TYPE (@0)) ++ && wi::eq_p (wi::to_widest (@1), TYPE_PRECISION (TREE_TYPE (@0)) - 1)))) ++ ++/* Transform ((x & ~mask) ? (-x)>>31 & mask : x) to (min (max (x, 0), mask)). ++ The matched pattern can be described as saturated clipping. ++ ++ The pattern supports truncation via both casts and bit_and. ++ Also there are patterns for possible inverted conditions. */ ++(if (flag_convert_minmax) ++/* Truncation via casts. Unfortunately convert? cannot be applied here ++ because convert and cond take different number of arguments. */ ++ (simplify ++ (convert ++ (cond ++ (ne (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop) ++ (convert? (minmax_sat_arg @0)) ++ (convert? @0))) ++ (if (wi::geu_p (~wi::to_widest (@1) + 1, TYPE_PRECISION (type))) ++ (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); } ++ (convert (min (max @0 { integer_zero_node; }) ++ { mask; }))))) ++ (simplify ++ (cond ++ (ne (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop) ++ (convert? (minmax_sat_arg @0)) ++ (convert? @0)) ++ (if (wi::geu_p (~wi::to_widest (@1) + 1, TYPE_PRECISION (type))) ++ (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); } ++ (convert (min (max @0 { integer_zero_node; }) ++ { mask; }))))) ++ ++ (simplify ++ (convert ++ (cond ++ (eq (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop) ++ (convert? @0) ++ (convert? (minmax_sat_arg @0)))) ++ (if (wi::geu_p (~wi::to_widest (@1) + 1, TYPE_PRECISION (type))) ++ (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); } ++ (convert (min (max @0 { integer_zero_node; }) ++ { mask; }))))) ++ (simplify ++ (cond ++ (eq (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop) ++ (convert? @0) ++ (convert? (minmax_sat_arg @0))) ++ (if (wi::geu_p (~wi::to_widest (@1) + 1, TYPE_PRECISION (type))) ++ (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); } ++ (convert (min (max @0 { integer_zero_node; }) ++ { mask; }))))) ++ ++ /* Truncation via bit_and with mask. Same concerns on convert? here. */ ++ (simplify ++ (convert ++ (cond ++ (ne (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop) ++ (convert? (bit_and (minmax_sat_arg @0) INTEGER_CST@2)) ++ (convert? @0))) ++ (if (wi::to_widest (@2) == ~wi::to_widest (@1)) ++ (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); } ++ (convert (min (max @0 { integer_zero_node; }) ++ { mask; }))))) ++ (simplify ++ (cond ++ (ne (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop) ++ (convert? (bit_and (minmax_sat_arg @0) INTEGER_CST@2)) ++ (convert? @0)) ++ (if (wi::to_widest (@2) == ~wi::to_widest (@1)) ++ (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); } ++ (convert (min (max @0 { integer_zero_node; }) ++ { mask; }))))) ++ ++ (simplify ++ (convert ++ (cond ++ (eq (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop) ++ (convert? @0) ++ (convert? (bit_and (minmax_sat_arg @0) INTEGER_CST@2)))) ++ (if (wi::to_widest (@2) == ~wi::to_widest (@1)) ++ (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); } ++ (convert (min (max @0 { integer_zero_node; }) ++ { mask; }))))) ++ (simplify ++ (cond ++ (eq (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop) ++ (convert? @0) ++ (convert? (bit_and (minmax_sat_arg @0) INTEGER_CST@2))) ++ (if (wi::to_widest (@2) == ~wi::to_widest (@1)) ++ (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); } ++ (convert (min (max @0 { integer_zero_node; }) ++ { mask; })))))) +diff --git a/gcc/testsuite/gcc.dg/combine-maxmin-1.c b/gcc/testsuite/gcc.dg/combine-maxmin-1.c +new file mode 100644 +index 000000000..859ff7df8 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/combine-maxmin-1.c +@@ -0,0 +1,15 @@ ++/* { dg-do compile { target aarch64-*-* } } */ ++/* { dg-options "-O3 -fconvert-minmax" } */ ++ ++#include ++ ++__attribute__((noinline)) ++void test (int32_t *restrict a, int32_t *restrict x) ++{ ++ for (int i = 0; i < 4; i++) ++ a[i] = ((((-x[i]) >> 31) ^ x[i]) ++ & (-((int32_t)((x[i] & (~((1 << 8)-1))) == 0)))) ^ ((-x[i]) >> 31); ++} ++ ++/* { dg-final { scan-assembler-not {smax\t} } } */ ++/* { dg-final { scan-assembler-not {smin\t} } } */ +diff --git a/gcc/testsuite/gcc.dg/combine-maxmin-2.c b/gcc/testsuite/gcc.dg/combine-maxmin-2.c +new file mode 100644 +index 000000000..63d4d85b3 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/combine-maxmin-2.c +@@ -0,0 +1,14 @@ ++/* { dg-do compile { target aarch64-*-* } } */ ++/* { dg-options "-O3 -fconvert-minmax" } */ ++ ++#include ++ ++__attribute__((noinline)) ++void test (int8_t *restrict a, int32_t *restrict x) ++{ ++ for (int i = 0; i < 8; i++) ++ a[i] = ((x[i] & ~((1 << 9)-1)) ? (-x[i])>>31 & ((1 << 9)-1) : x[i]); ++} ++ ++/* { dg-final { scan-assembler-times {smax\t} 4 } } */ ++/* { dg-final { scan-assembler-times {smin\t} 4 } } */ +diff --git a/gcc/testsuite/gcc.dg/combine-maxmin.c b/gcc/testsuite/gcc.dg/combine-maxmin.c +index 06bce7029..a984fa560 100755 +--- a/gcc/testsuite/gcc.dg/combine-maxmin.c ++++ b/gcc/testsuite/gcc.dg/combine-maxmin.c +@@ -1,5 +1,5 @@ + /* { dg-do compile { target aarch64-*-* } } */ +-/* { dg-options "-O3 -fdump-rtl-combine-all" } */ ++/* { dg-options "-O3 -fconvert-minmax" } */ + + /* The test checks usage of smax/smin insns for clip evaluation and + * uzp1/uzp2 insns for vector element narrowing. It's inspired by +@@ -19,20 +19,26 @@ void hf (uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, + { + const int pad = (8 > 9) ? (-10 * ((1 << 8)-1)) : 0; + for( int y = 0; y < height; y++ ) { ++ /* This loop is not being vectorized now. */ + for( int x = -2; x < width+3; x++ ) { + int v = ((src)[x-2*stride] + (src)[x+3*stride] - 5*((src)[x-stride] + + (src)[x+2*stride]) + 20*((src)[x] + (src)[x+stride])); + dstv[x] = clip ( (v + 16) >> 5 ); + buf[x+2] = v + pad; + } ++ ++ /* Produces two versions of the code: 3xUZP1/2xMAX/2xMIN + 1xUZP1/1xMAX/1xMIN. */ + for( int x = 0; x < width; x++ ) + dstc[x] = clip ((((buf+2)[x-2*1] + (buf+2)[x+3*1] - 5*((buf+2)[x-1] + + (buf+2)[x+2*1]) + 20*((buf+2)[x] + (buf+2)[x+1])) + - 32*pad + 512) >> 10); ++ ++ /* Priduces two versions of the code: 1xUZP1/2xMAX/2xMIN + 0xUZP1/1xMAX/1xMIN. */ + for( int x = 0; x < width; x++ ) + dsth[x] = clip ((((src)[x-2*1] + (src)[x+3*1] - 5*((src)[x-1] + + (src)[x+2*1]) + 20*((src)[x] + (src)[x+1])) + + 16) >> 5); ++ + dsth += stride; + dstv += stride; + dstc += stride; +@@ -40,7 +46,10 @@ void hf (uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, + } + } + +-/* { dg-final { scan-assembler-times {smax\t} 4 } } */ +-/* { dg-final { scan-assembler-times {smin\t} 4 } } */ +-/* { dg-final { scan-assembler-times {cmtst\t} 2 } } */ +-/* { dg-final { scan-assembler-times {uzp1\t} 6 } } */ ++/* Max is performed on 0 from signed values, match smax exactly. */ ++/* { dg-final { scan-assembler-times {smax\t} 6 } } */ ++/* Min is performed on signed val>0 and a mask, min sign doesn't matter. */ ++/* { dg-final { scan-assembler-times {[us]min\t} 6 } } */ ++/* All of the vectorized patterns are expected to be matched. */ ++/* { dg-final { scan-assembler-not {cmtst\t} } } */ ++/* { dg-final { scan-assembler-times {uzp1\t} 5 } } */ +-- +2.33.0 + diff --git a/0046-Add-new-pattern-to-pass-the-maxmin-tests.patch b/0046-Add-new-pattern-to-pass-the-maxmin-tests.patch new file mode 100644 index 0000000000000000000000000000000000000000..9ceba88090b58a20e5d3c4d2d6c70327cfbd9f47 --- /dev/null +++ b/0046-Add-new-pattern-to-pass-the-maxmin-tests.patch @@ -0,0 +1,65 @@ +From dbcb2630c426c8dd2117b5ce625da8422dd8cd65 Mon Sep 17 00:00:00 2001 +From: Diachkov Ilia +Date: Thu, 22 Feb 2024 17:20:17 +0800 +Subject: [PATCH 14/18] Add new pattern to pass the maxmin tests + +--- + gcc/match.pd | 24 ++++++++++++++++++++++++ + gcc/testsuite/gcc.dg/combine-maxmin.c | 2 +- + 2 files changed, 25 insertions(+), 1 deletion(-) + +diff --git a/gcc/match.pd b/gcc/match.pd +index 3a19e93b3..aee58e47b 100644 +--- a/gcc/match.pd ++++ b/gcc/match.pd +@@ -8038,6 +8038,10 @@ and, + (match (minmax_cmp_arg @0 @1) + (bit_and @0 INTEGER_CST@1) + (if (wi::popcount (~wi::to_widest (@1) + 1) == 1))) ++/* Match ((unsigned) a > 0b0..01..1) pattern. */ ++(match (minmax_cmp_arg1 @0 @1) ++ (gt @0 INTEGER_CST@1) ++ (if (wi::popcount (wi::to_widest (@1) + 1) == 1))) + + /* Match (inversed_sign_bit >> sign_bit_pos) pattern. + This statement is blocking for the transformation of unsigned integers. +@@ -8095,6 +8099,26 @@ and, + (convert (min (max @0 { integer_zero_node; }) + { mask; }))))) + ++ (simplify ++ (convert ++ (cond ++ (minmax_cmp_arg1 (convert? @0) INTEGER_CST@1) ++ (convert? (minmax_sat_arg @0)) ++ (convert? @0))) ++ (if (wi::geu_p (wi::to_widest (@1) + 1, TYPE_PRECISION (type))) ++ (with { tree mask = build_int_cst (integer_type_node, tree_to_shwi (@1)); } ++ (convert (min (max (convert:integer_type_node @0) { integer_zero_node; }) ++ { mask; }))))) ++ (simplify ++ (cond ++ (minmax_cmp_arg1 (convert? @0) INTEGER_CST@1) ++ (convert? (minmax_sat_arg @0)) ++ (convert? @0)) ++ (if (wi::geu_p (wi::to_widest (@1) + 1, TYPE_PRECISION (type))) ++ (with { tree mask = build_int_cst (integer_type_node, tree_to_shwi (@1)); } ++ (convert (min (max (convert:integer_type_node @0) { integer_zero_node; }) ++ { mask; }))))) ++ + /* Truncation via bit_and with mask. Same concerns on convert? here. */ + (simplify + (convert +diff --git a/gcc/testsuite/gcc.dg/combine-maxmin.c b/gcc/testsuite/gcc.dg/combine-maxmin.c +index a984fa560..5c0c9cc49 100755 +--- a/gcc/testsuite/gcc.dg/combine-maxmin.c ++++ b/gcc/testsuite/gcc.dg/combine-maxmin.c +@@ -52,4 +52,4 @@ void hf (uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, + /* { dg-final { scan-assembler-times {[us]min\t} 6 } } */ + /* All of the vectorized patterns are expected to be matched. */ + /* { dg-final { scan-assembler-not {cmtst\t} } } */ +-/* { dg-final { scan-assembler-times {uzp1\t} 5 } } */ ++/* { dg-final { scan-assembler-times {uzp1\t} 2 } } */ +-- +2.33.0 + diff --git a/0047-AES-Implement-AES-pattern-matching.patch b/0047-AES-Implement-AES-pattern-matching.patch new file mode 100644 index 0000000000000000000000000000000000000000..cd983bf2c5ff86e3037b9f0963e6bdc3c9b77fc0 --- /dev/null +++ b/0047-AES-Implement-AES-pattern-matching.patch @@ -0,0 +1,3968 @@ +From 53d321d2fe08f69a29527be157d4bcaaefea04ab Mon Sep 17 00:00:00 2001 +From: Pronin Alexander 00812787 +Date: Wed, 6 Dec 2023 10:46:28 +0300 +Subject: [PATCH 15/18] [AES] Implement AES pattern matching + +--- + gcc/Makefile.in | 1 + + gcc/common.opt | 4 + + gcc/config/aarch64/aarch64.cc | 24 + + gcc/crypto-accel.cc | 2415 +++++++++++++++++ + gcc/doc/tm.texi | 29 + + gcc/doc/tm.texi.in | 12 + + gcc/passes.def | 1 + + gcc/rtl-matcher.h | 367 +++ + gcc/target.def | 41 + + .../gcc.target/aarch64/aes-decrypt.c | 478 ++++ + .../gcc.target/aarch64/aes-encrypt.c | 443 +++ + gcc/timevar.def | 1 + + gcc/tree-pass.h | 1 + + 13 files changed, 3817 insertions(+) + create mode 100644 gcc/crypto-accel.cc + create mode 100644 gcc/rtl-matcher.h + create mode 100644 gcc/testsuite/gcc.target/aarch64/aes-decrypt.c + create mode 100644 gcc/testsuite/gcc.target/aarch64/aes-encrypt.c + +diff --git a/gcc/Makefile.in b/gcc/Makefile.in +index 45705c1f3..876000bda 100644 +--- a/gcc/Makefile.in ++++ b/gcc/Makefile.in +@@ -1332,6 +1332,7 @@ OBJS = \ + cgraphunit.o \ + cgraphclones.o \ + combine.o \ ++ crypto-accel.o \ + combine-stack-adj.o \ + compare-elim.o \ + context.o \ +diff --git a/gcc/common.opt b/gcc/common.opt +index 3a5004271..1eb62ada5 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1129,6 +1129,10 @@ Common Var(flag_array_widen_compare) Optimization + Extends types for pointers to arrays to improve array comparsion performance. + In some extreme situations this may result in unsafe behavior. + ++fcrypto-accel-aes ++Common Var(flag_crypto_accel_aes) Init(0) Optimization ++Perform crypto acceleration AES pattern matching. ++ + fauto-inc-dec + Common Var(flag_auto_inc_dec) Init(1) Optimization + Generate auto-inc/dec instructions. +diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc +index fa566dd80..9171d9d56 100644 +--- a/gcc/config/aarch64/aarch64.cc ++++ b/gcc/config/aarch64/aarch64.cc +@@ -27569,6 +27569,30 @@ is_aarch64_stp_insn (int icode, bool *has_wb) + #undef TARGET_IS_STP_INSN + #define TARGET_IS_STP_INSN is_aarch64_stp_insn + ++machine_mode ++aarch64_get_v16qi_mode () ++{ ++ return V16QImode; ++} ++ ++#undef TARGET_GET_V16QI_MODE ++#define TARGET_GET_V16QI_MODE aarch64_get_v16qi_mode ++ ++#undef TARGET_GEN_REV32V16QI ++#define TARGET_GEN_REV32V16QI gen_aarch64_rev32v16qi ++ ++#undef TARGET_GEN_AESEV16QI ++#define TARGET_GEN_AESEV16QI gen_aarch64_crypto_aesev16qi ++ ++#undef TARGET_GEN_AESDV16QI ++#define TARGET_GEN_AESDV16QI gen_aarch64_crypto_aesdv16qi ++ ++#undef TARGET_GEN_AESMCV16QI ++#define TARGET_GEN_AESMCV16QI gen_aarch64_crypto_aesmcv16qi ++ ++#undef TARGET_GEN_AESIMCV16QI ++#define TARGET_GEN_AESIMCV16QI gen_aarch64_crypto_aesimcv16qi ++ + #undef TARGET_STACK_PROTECT_GUARD + #define TARGET_STACK_PROTECT_GUARD aarch64_stack_protect_guard + +diff --git a/gcc/crypto-accel.cc b/gcc/crypto-accel.cc +new file mode 100644 +index 000000000..f4e810a6b +--- /dev/null ++++ b/gcc/crypto-accel.cc +@@ -0,0 +1,2415 @@ ++/* Crypto-pattern optimizer. ++ Copyright (C) 2003-2023 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#include "config.h" ++#define INCLUDE_VECTOR ++#define INCLUDE_MAP ++#define INCLUDE_SET ++#define INCLUDE_ALGORITHM ++#include "system.h" ++#include "coretypes.h" ++#include "backend.h" ++#include "target.h" ++#include "rtl.h" ++#include "tree.h" ++#include "df.h" ++#include "memmodel.h" ++#include "optabs.h" ++#include "regs.h" ++#include "emit-rtl.h" ++#include "recog.h" ++#include "cfgrtl.h" ++#include "cfgcleanup.h" ++#include "expr.h" ++#include "tree-pass.h" ++#include "rtl-matcher.h" ++ ++/* Basic AES table descryption. */ ++struct aes_table ++{ ++ /* Number of elements per table. */ ++ static const unsigned int table_nelts = 256; ++ /* Number of tables. */ ++ static const unsigned int basic_tables_num = 4; ++ /* Number of rounds. */ ++ static const unsigned int rounds_num = 4; ++ /* Common ID for wrong table. */ ++ static const unsigned int BAD_TABLE = -1; ++ ++ typedef const unsigned int table_type[table_nelts]; ++ typedef table_type *table_map[basic_tables_num]; ++ ++ template ++ static bool is_basic_table (tree ctor, const T ethalon[table_nelts]) ++ { ++ if (TREE_CODE (ctor) != CONSTRUCTOR ++ ||CONSTRUCTOR_NELTS (ctor) != table_nelts) ++ return false; ++ ++ unsigned ix; ++ tree val; ++ FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (ctor), ix, val) ++ if (TREE_INT_CST_LOW (val) != ethalon[ix]) ++ return false; ++ return true; ++ } ++ ++ static unsigned check_table (tree ctor, ++ table_map tables) ++ { ++ for (unsigned i = 0; i < 4; ++i) ++ if (is_basic_table (ctor, *tables[i])) ++ return i; ++ return BAD_TABLE; ++ } ++}; ++ ++/* AES encryption info. */ ++struct aes_encrypt_table : aes_table ++{ ++ typedef enum ++ { ++ TE0, ++ TE1, ++ TE2, ++ TE3, ++ BAD_TABLE = aes_table::BAD_TABLE ++ } table_entry; ++ ++ static table_type Te0; ++ static table_type Te1; ++ static table_type Te2; ++ static table_type Te3; ++ ++ static table_map tables; ++ static table_entry rounds[rounds_num]; ++ static table_entry final_rounds[rounds_num]; ++ ++ static table_entry get_table_id (tree ctor) ++ { ++ return static_cast (check_table (ctor, tables)); ++ } ++}; ++ ++/* AES decryption info. */ ++struct aes_decrypt_table : aes_table ++{ ++ typedef enum ++ { ++ TD0, ++ TD1, ++ TD2, ++ TD3, ++ TD4, ++ BAD_TABLE = aes_table::BAD_TABLE ++ } table_entry; ++ ++ static table_type Td0; ++ static table_type Td1; ++ static table_type Td2; ++ static table_type Td3; ++ ++ static table_map tables; ++ static table_entry rounds[rounds_num]; ++ static table_entry final_rounds[rounds_num]; ++ ++ static const unsigned char Td4[table_nelts]; ++ ++ /* TD4 requires special handler due to type shrinking optimizations. */ ++ static bool is_td4 (tree ctor) ++ { ++ if (is_basic_table (ctor, Td4)) ++ return true; ++ ++ if (TREE_CODE (ctor) != STRING_CST ++ || TREE_STRING_LENGTH (ctor) != table_nelts) ++ return false; ++ ++ const unsigned char *p ++ = (const unsigned char *) TREE_STRING_POINTER (ctor); ++ for (int i = 0; i < TREE_STRING_LENGTH (ctor); ++i) ++ if (p[i] != Td4[i]) ++ return false; ++ ++ return true; ++ } ++ ++ static table_entry get_table_id (tree ctor) ++ { ++ unsigned int res = check_table (ctor, tables); ++ if (res == aes_table::BAD_TABLE ++ && is_td4 (ctor)) ++ return TD4; ++ return static_cast (res); ++ } ++}; ++ ++/* Basic tables info. */ ++aes_encrypt_table::table_map aes_encrypt_table::tables ++ = { &Te0, &Te1, &Te2, &Te3 }; ++aes_decrypt_table::table_map aes_decrypt_table::tables ++ = { &Td0, &Td1, &Td2, &Td3 }; ++ ++/* Round tables permutations info. */ ++aes_encrypt_table::table_entry aes_encrypt_table::rounds[] ++ = {TE0, TE1, TE2, TE3}; ++aes_decrypt_table::table_entry aes_decrypt_table::rounds[] ++ = {TD0, TD1, TD2, TD3}; ++aes_encrypt_table::table_entry aes_encrypt_table::final_rounds[] ++ = {TE2, TE3, TE0, TE1}; ++aes_decrypt_table::table_entry aes_decrypt_table::final_rounds[] ++ = {TD4, TD4, TD4, TD4}; ++ ++aes_encrypt_table::table_type aes_encrypt_table::Te0 = { ++ 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, ++ 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U, ++ 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU, ++ 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU, ++ 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U, ++ 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU, ++ 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU, ++ 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU, ++ 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU, ++ 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU, ++ 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U, ++ 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU, ++ 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU, ++ 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U, ++ 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU, ++ 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU, ++ 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU, ++ 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU, ++ 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU, ++ 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U, ++ 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU, ++ 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU, ++ 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU, ++ 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU, ++ 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U, ++ 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U, ++ 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U, ++ 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U, ++ 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU, ++ 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U, ++ 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U, ++ 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU, ++ 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU, ++ 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U, ++ 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U, ++ 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U, ++ 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU, ++ 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U, ++ 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU, ++ 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U, ++ 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU, ++ 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U, ++ 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U, ++ 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU, ++ 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U, ++ 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U, ++ 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U, ++ 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U, ++ 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U, ++ 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U, ++ 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U, ++ 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U, ++ 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU, ++ 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U, ++ 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U, ++ 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U, ++ 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U, ++ 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U, ++ 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U, ++ 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU, ++ 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U, ++ 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U, ++ 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U, ++ 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU, ++}; ++ ++aes_encrypt_table::table_type aes_encrypt_table::Te1 = { ++ 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU, ++ 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U, ++ 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU, ++ 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U, ++ 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU, ++ 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U, ++ 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU, ++ 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U, ++ 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U, ++ 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU, ++ 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U, ++ 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U, ++ 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U, ++ 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU, ++ 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U, ++ 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U, ++ 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU, ++ 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U, ++ 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U, ++ 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U, ++ 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU, ++ 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU, ++ 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U, ++ 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU, ++ 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU, ++ 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U, ++ 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU, ++ 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U, ++ 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU, ++ 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U, ++ 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U, ++ 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U, ++ 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU, ++ 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U, ++ 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU, ++ 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U, ++ 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU, ++ 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U, ++ 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U, ++ 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU, ++ 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU, ++ 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU, ++ 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U, ++ 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U, ++ 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU, ++ 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U, ++ 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU, ++ 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U, ++ 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU, ++ 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U, ++ 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU, ++ 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU, ++ 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U, ++ 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU, ++ 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U, ++ 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU, ++ 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U, ++ 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U, ++ 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U, ++ 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU, ++ 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU, ++ 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U, ++ 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU, ++ 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U, ++}; ++ ++aes_encrypt_table::table_type aes_encrypt_table::Te2 = { ++ 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU, ++ 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U, ++ 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU, ++ 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U, ++ 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU, ++ 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U, ++ 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU, ++ 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U, ++ 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U, ++ 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU, ++ 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U, ++ 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U, ++ 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U, ++ 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU, ++ 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U, ++ 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U, ++ 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU, ++ 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U, ++ 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U, ++ 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U, ++ 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU, ++ 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU, ++ 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U, ++ 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU, ++ 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU, ++ 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U, ++ 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU, ++ 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U, ++ 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU, ++ 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U, ++ 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U, ++ 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U, ++ 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU, ++ 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U, ++ 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU, ++ 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U, ++ 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU, ++ 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U, ++ 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U, ++ 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU, ++ 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU, ++ 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU, ++ 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U, ++ 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U, ++ 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU, ++ 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U, ++ 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU, ++ 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U, ++ 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU, ++ 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U, ++ 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU, ++ 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU, ++ 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U, ++ 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU, ++ 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U, ++ 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU, ++ 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U, ++ 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U, ++ 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U, ++ 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU, ++ 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU, ++ 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U, ++ 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU, ++ 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U, ++}; ++ ++aes_encrypt_table::table_type aes_encrypt_table::Te3 = { ++ 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U, ++ 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U, ++ 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U, ++ 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU, ++ 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU, ++ 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU, ++ 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U, ++ 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU, ++ 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU, ++ 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U, ++ 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U, ++ 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU, ++ 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU, ++ 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU, ++ 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU, ++ 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU, ++ 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U, ++ 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU, ++ 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU, ++ 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U, ++ 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U, ++ 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U, ++ 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U, ++ 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U, ++ 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU, ++ 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U, ++ 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU, ++ 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU, ++ 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U, ++ 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U, ++ 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U, ++ 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU, ++ 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U, ++ 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU, ++ 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU, ++ 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U, ++ 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U, ++ 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU, ++ 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U, ++ 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU, ++ 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U, ++ 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U, ++ 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U, ++ 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U, ++ 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU, ++ 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U, ++ 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU, ++ 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U, ++ 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU, ++ 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U, ++ 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU, ++ 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU, ++ 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU, ++ 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU, ++ 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U, ++ 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U, ++ 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U, ++ 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U, ++ 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U, ++ 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U, ++ 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU, ++ 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U, ++ 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU, ++ 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU, ++}; ++ ++aes_decrypt_table::table_type aes_decrypt_table::Td0 = { ++ 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U, ++ 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U, ++ 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U, ++ 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU, ++ 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U, ++ 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U, ++ 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU, ++ 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U, ++ 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU, ++ 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U, ++ 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U, ++ 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U, ++ 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U, ++ 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU, ++ 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U, ++ 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU, ++ 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U, ++ 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU, ++ 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U, ++ 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U, ++ 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U, ++ 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU, ++ 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U, ++ 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU, ++ 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U, ++ 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU, ++ 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U, ++ 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU, ++ 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU, ++ 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U, ++ 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU, ++ 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U, ++ 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU, ++ 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U, ++ 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U, ++ 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U, ++ 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU, ++ 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U, ++ 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U, ++ 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU, ++ 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U, ++ 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U, ++ 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U, ++ 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U, ++ 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U, ++ 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU, ++ 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U, ++ 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U, ++ 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U, ++ 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U, ++ 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U, ++ 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU, ++ 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU, ++ 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU, ++ 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU, ++ 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U, ++ 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U, ++ 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU, ++ 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU, ++ 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U, ++ 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU, ++ 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U, ++ 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U, ++ 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U, ++}; ++ ++aes_decrypt_table::table_type aes_decrypt_table::Td1 = { ++ 0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU, ++ 0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U, ++ 0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU, ++ 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U, ++ 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U, ++ 0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U, ++ 0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U, ++ 0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U, ++ 0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U, ++ 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU, ++ 0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU, ++ 0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU, ++ 0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U, ++ 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU, ++ 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U, ++ 0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U, ++ 0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U, ++ 0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU, ++ 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU, ++ 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U, ++ 0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU, ++ 0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U, ++ 0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU, ++ 0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU, ++ 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U, ++ 0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U, ++ 0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U, ++ 0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU, ++ 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U, ++ 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU, ++ 0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U, ++ 0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U, ++ 0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U, ++ 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU, ++ 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U, ++ 0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U, ++ 0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U, ++ 0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U, ++ 0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U, ++ 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U, ++ 0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU, ++ 0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU, ++ 0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U, ++ 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU, ++ 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U, ++ 0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU, ++ 0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU, ++ 0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U, ++ 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU, ++ 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U, ++ 0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U, ++ 0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U, ++ 0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U, ++ 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U, ++ 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U, ++ 0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U, ++ 0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU, ++ 0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U, ++ 0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U, ++ 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU, ++ 0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U, ++ 0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U, ++ 0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U, ++ 0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U, ++}; ++ ++aes_decrypt_table::table_type aes_decrypt_table::Td2 = { ++ 0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U, ++ 0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U, ++ 0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U, ++ 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U, ++ 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU, ++ 0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U, ++ 0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U, ++ 0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U, ++ 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U, ++ 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU, ++ 0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U, ++ 0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U, ++ 0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU, ++ 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U, ++ 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U, ++ 0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U, ++ 0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U, ++ 0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U, ++ 0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U, ++ 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU, ++ 0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U, ++ 0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U, ++ 0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U, ++ 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U, ++ 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U, ++ 0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU, ++ 0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU, ++ 0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U, ++ 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU, ++ 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U, ++ 0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU, ++ 0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU, ++ 0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU, ++ 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU, ++ 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U, ++ 0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U, ++ 0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U, ++ 0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U, ++ 0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U, ++ 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U, ++ 0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U, ++ 0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU, ++ 0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU, ++ 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U, ++ 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U, ++ 0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU, ++ 0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU, ++ 0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U, ++ 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U, ++ 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U, ++ 0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U, ++ 0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U, ++ 0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U, ++ 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U, ++ 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU, ++ 0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U, ++ 0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U, ++ 0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U, ++ 0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U, ++ 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U, ++ 0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U, ++ 0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU, ++ 0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U, ++ 0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U, ++}; ++ ++aes_decrypt_table::table_type aes_decrypt_table::Td3 = { ++ 0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU, ++ 0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU, ++ 0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U, ++ 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U, ++ 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU, ++ 0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU, ++ 0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U, ++ 0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU, ++ 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U, ++ 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU, ++ 0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U, ++ 0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U, ++ 0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U, ++ 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U, ++ 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U, ++ 0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU, ++ 0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU, ++ 0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U, ++ 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U, ++ 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU, ++ 0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU, ++ 0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U, ++ 0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U, ++ 0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U, ++ 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U, ++ 0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU, ++ 0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U, ++ 0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U, ++ 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU, ++ 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU, ++ 0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U, ++ 0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U, ++ 0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U, ++ 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU, ++ 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U, ++ 0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U, ++ 0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U, ++ 0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U, ++ 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U, ++ 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U, ++ 0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U, ++ 0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU, ++ 0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U, ++ 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U, ++ 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU, ++ 0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU, ++ 0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U, ++ 0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU, ++ 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U, ++ 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U, ++ 0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U, ++ 0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U, ++ 0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U, ++ 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U, ++ 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU, ++ 0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU, ++ 0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU, ++ 0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU, ++ 0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U, ++ 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U, ++ 0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U, ++ 0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU, ++ 0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U, ++ 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U, ++}; ++ ++const unsigned char aes_decrypt_table::Td4[table_nelts] = { ++ 0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U, ++ 0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU, ++ 0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U, ++ 0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU, ++ 0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU, ++ 0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU, ++ 0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U, ++ 0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U, ++ 0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U, ++ 0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U, ++ 0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU, ++ 0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U, ++ 0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU, ++ 0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U, ++ 0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U, ++ 0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU, ++ 0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU, ++ 0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U, ++ 0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U, ++ 0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU, ++ 0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U, ++ 0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU, ++ 0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U, ++ 0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U, ++ 0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U, ++ 0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU, ++ 0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU, ++ 0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU, ++ 0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U, ++ 0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U, ++ 0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U, ++ 0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU, ++}; ++ ++/* In-round shifts info. */ ++static const unsigned HOST_WIDE_INT shift_csts[4] = {24, 16, 8, 0}; ++ ++/* Check if the pattern is plus-const. Helper for memref analysis. */ ++static bool ++plus_const_int_p (rtx op) ++{ ++ return GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)); ++} ++ ++/* Obtain info about memory access. */ ++static bool ++decompose_mem (rtx mem, rtx &base, unsigned HOST_WIDE_INT &offset) ++{ ++ address_info info; ++ decompose_mem_address (&info, mem); ++ if (!info.base) ++ return false; ++ ++ base = *info.base; ++ ++ rtx op = XEXP (mem, 0); ++ if (plus_const_int_p (op)) ++ offset = UINTVAL (XEXP (op, 1)); ++ /* TODO: WRONG IN GENERAL CASE: we cannot guarantee that the offsets were not ++ changed. */ ++ else if ((GET_CODE (op) == PRE_MODIFY && plus_const_int_p (XEXP (op, 1))) ++ || REG_P (op)) ++ offset = 0; ++ else ++ return false; ++ ++ return true; ++} ++ ++/* Check if the regs in stmt are same as the provided ones. */ ++static bool ++cmp_regs_in_stmt (rtx stmt, rtx lhs, rtx rhs) ++{ ++ return (XEXP (stmt, 0) == lhs) && (XEXP (stmt, 1) == rhs); ++} ++ ++/* AES key info. Inhereted from mem_term_info to be used inside ++ matchers without any unnecessary casts. */ ++struct aes_key : mem_term_info ++{ ++ aes_key () ++ {} ++ aes_key (void *) ++ : mem_term_info (NULL, NULL_RTX) ++ {} ++ aes_key (const mem_term_info &m) ++ : mem_term_info (m) ++ {} ++ ++ /* Check if the key has the same base pointer origin as another one. ++ This check is required due to some possible CSE optimizations applied on ++ pointers before this pass. */ ++ bool has_same_origin (const aes_key &other, rtx_insn *use_point) const ++ { ++ /* Simple case: the pointer is same. */ ++ if (src == other.src) ++ return true; ++ ++ if (!use_point) ++ return false; ++ ++ basic_block curr_bb = BLOCK_FOR_INSN (use_point); ++ if (!single_pred_p (curr_bb) ++ || modified_between_p (src, BB_HEAD (curr_bb), use_point) ++ || modified_between_p (other.src, BB_HEAD (curr_bb), use_point)) ++ return false; ++ ++ edge e = single_pred_edge (curr_bb); ++ rtx_insn *jump = BB_END (e->src); ++ if (!any_condjump_p (jump)) ++ return false; ++ ++ basic_block from_bb = BLOCK_FOR_INSN (jump); ++ if (EDGE_COUNT (from_bb->succs) != 2) ++ return false; ++ ++ /* Need proof that the sources are equal: try to get it from ++ terminating condition. */ ++ rtx cond = XEXP (SET_SRC (pc_set (jump)), 0); ++ rtx_code code = GET_CODE (cond); ++ if (!((code == EQ && EDGE_SUCC (from_bb, 0) == e) ++ || (code == NE && EDGE_SUCC (from_bb, 1) == e))) ++ return false; ++ ++ rtx arg1 = XEXP (cond, 0); ++ if (XEXP (cond, 1) != CONST0_RTX (GET_MODE (arg1)) ++ || COMPARISON_P (arg1)) ++ return false; ++ ++ rtx_insn *cmp_insn = get_single_def_insn (jump, arg1); ++ rtx cmp; ++ if (!cmp_insn || !(cmp = get_single_set_op (cmp_insn))) ++ return false; ++ ++ if (!(cmp_regs_in_stmt (cmp, src, other.src) ++ || cmp_regs_in_stmt (cmp, other.src, src))) ++ return false; ++ ++ return true; ++ } ++}; ++ ++/* AES basic state input info. Inhereted from mem_term_info ++ to use it in matchers without any unnecessary casts. */ ++struct state_input_info : mem_term_info ++{ ++ state_input_info () ++ {} ++ state_input_info (const aes_key &k) ++ : mem_term_info (k), is_key (true) ++ {} ++ state_input_info (const mem_term_info &m) ++ : mem_term_info (m), is_key (false) ++ {} ++ ++ bool is_key; ++ ++ bool verify (const state_input_info *prev) const ++ { ++ if (!prev) ++ return true; ++ ++ return BLOCK_FOR_INSN (loc) == BLOCK_FOR_INSN (prev->loc); ++ } ++}; ++ ++/* Memory matcher to filter only suitable memory instructions. */ ++struct mem_matcher : matcher_term ++{ ++ static bool match (rtx_insn *insn, holder_type &m) ++ { ++ rtx src = get_single_set_op (insn); ++ return src && match (src, insn, m); ++ } ++ ++ static bool match (rtx src, rtx_insn *insn, holder_type &m) ++ { ++ if (!MEM_P (src)) ++ return false; ++ ++ mem_term_info info (NULL, NULL_RTX); ++ if (!decompose_mem (src, info.src, info.offset)) ++ return false; ++ ++ info.loc = insn; ++ m[0] = info; ++ return true; ++ } ++}; ++ ++/* AES entry input info. Enhanced from state input due to ideological ++ similarities. */ ++struct input_info : state_input_info ++{ ++ input_info () ++ {} ++ input_info (const mem_term_info &m, unsigned HOST_WIDE_INT shift_cst) ++ : state_input_info (m), shift_cst (shift_cst) ++ {} ++ input_info (const aes_key &k) ++ : state_input_info (k) ++ {} ++ ++ unsigned HOST_WIDE_INT shift_cst; ++ ++ /* Input info is sorted by references offsets. */ ++ bool operator < (const input_info &rhs) const ++ { ++ return offset < rhs.offset; ++ } ++ ++ std::pair input () const ++ { ++ return std::make_pair (src, offset); ++ } ++ ++ bool verify (const input_info *prev, unsigned i) const ++ { ++ if (!state_input_info::verify (prev)) ++ return false; ++ ++ /* Previous state should reference the previous element ++ of the same buffer. */ ++ if (prev && (src != prev->src || offset != prev->offset + 1)) ++ return false; ++ ++ /* State should use the corresponding shift constant. */ ++ return shift_csts[i] == shift_cst; ++ } ++ ++ static bool finalize (rtx_insn *insn, input_info *m) ++ { ++ typedef unop_matcher zext_matcher; ++ ++ zext_matcher::holder_type zext; ++ if (zext_matcher::match (insn, zext)) ++ { ++ *m = input_info (zext[0], 0); ++ return true; ++ } ++ ++ typedef binop_matcher > ++ shifted_variant; ++ shifted_variant::holder_type lsh; ++ if (!shifted_variant::match (insn, lsh)) ++ return false; ++ ++ gcc_assert (CONST_INT_P (lsh[1].src)); ++ *m = input_info (lsh[0], UINTVAL (lsh[1].src)); ++ return true; ++ } ++}; ++ ++/* Check if the corresponding constants combinations may be used for ++ AES table access. */ ++static bool ++verify_table_access (unsigned HOST_WIDE_INT shift_cst, ++ unsigned HOST_WIDE_INT and_cst = 0xFF, ++ bool and_present = true) ++{ ++ if (and_cst != 0xFF) ++ return false; ++ ++ switch (shift_cst) ++ { ++ case 0: ++ case 8: ++ case 16: ++ return and_present; ++ case 24: ++ return true; ++ default: ++ return false; ++ } ++} ++ ++/* AES table reference description. */ ++template ++struct aes_table_ref ++{ ++ rtx_insn *insn; ++ rtx_insn *output_insn; ++ unsigned HOST_WIDE_INT lsr_cst; ++ rtx reg; ++ rtx output; ++ typename TABLE_T::table_entry itable; ++ bool is_final; ++ ++ bool verify (unsigned i) const ++ { ++ typename TABLE_T::table_entry (ðalon)[TABLE_T::rounds_num] ++ = is_final ? TABLE_T::final_rounds : TABLE_T::rounds; ++ return lsr_cst == shift_csts[i] && itable == ethalon[i]; ++ } ++}; ++ ++/* Check the minimal requirements of the pattern to be a table reference ++ and wrap the table id getter function. */ ++template ++static typename T::table_entry ++check_table (rtx mem) ++{ ++ tree expr = MEM_EXPR (mem); ++ if (!expr || TREE_CODE (expr) != ARRAY_REF) ++ return T::BAD_TABLE; ++ ++ tree decl = TREE_OPERAND (expr, 0); ++ if (!decl || !DECL_P (decl) || !TREE_READONLY (decl)) ++ return T::BAD_TABLE; ++ ++ tree ctor = DECL_INITIAL (decl); ++ if (!ctor) ++ return T::BAD_TABLE; ++ ++ return T::get_table_id (ctor); ++} ++ ++/* Simplified memory info. Used for simplier table ref analysis. */ ++struct simplified_mem_info ++{ ++ rtx base_reg; ++ rtx index; ++}; ++ ++/* Try to obtain table reference info. */ ++static bool ++decompose_tref_mem_address (simplified_mem_info &info, rtx mem) ++{ ++ address_info addr_info; ++ decompose_mem_address (&addr_info, mem); ++ if (!addr_info.base || !addr_info.index) ++ return false; ++ ++ info.base_reg = *addr_info.base; ++ info.index = *addr_info.index; ++ ++ if (!REG_P (info.base_reg)) ++ return false; ++ ++ if (addr_info.mode == SImode) ++ { ++ if (GET_CODE (info.index) != MULT) ++ return false; ++ ++ rtx cst = XEXP (info.index, 1); ++ if (!CONST_INT_P (cst) || UINTVAL (cst) != 4) ++ return false; ++ ++ info.index = XEXP (info.index, 0); ++ return true; ++ } ++ ++ return (addr_info.mode == QImode); ++} ++ ++/* Find the possible final output instruction. */ ++template ++static rtx_insn * ++get_possible_final_output (rtx_insn *insn, rtx reg, ++ unsigned HOST_WIDE_INT shift_cst, ++ typename TABLE_T::table_entry itable); ++ ++/* Specialize the function for AES encryption. The output is AND instruction ++ with propper constant. */ ++template<> ++rtx_insn * ++get_possible_final_output (rtx_insn *insn, rtx reg, ++ unsigned HOST_WIDE_INT shift_cst, ++ aes_encrypt_table::table_entry) ++{ ++ rtx_insn *out = get_single_use_insn (insn, reg); ++ if (!out) ++ return NULL; ++ ++ rtx cst_val = get_op_const_cst (out); ++ if (!cst_val) ++ return NULL; ++ ++ unsigned HOST_WIDE_INT ethalon; ++ switch (shift_cst) ++ { ++ case 24: ++ ethalon = 0xffffffffff000000; ++ break; ++ case 16: ++ ethalon = 0xff0000; ++ break; ++ case 8: ++ ethalon = 0xff00; ++ break; ++ case 0: ++ ethalon = 0xff; ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ ++ return UINTVAL (cst_val) == ethalon ? out : NULL; ++} ++ ++/* Specialize the function for AES decryption. The output is ASHIFT instruction ++ with propper constant or direct reference to TD4 table. ++ ++ TODO: TD4 check might be done here for all the cases. However, now it is not ++ done here to make decryption and encryption matching ++ more general in common. */ ++template<> ++rtx_insn * ++get_possible_final_output (rtx_insn *insn, rtx reg, ++ unsigned HOST_WIDE_INT shift_cst, ++ aes_decrypt_table::table_entry it) ++{ ++ rtx_insn *out = get_single_use_insn (insn, reg); ++ if (!out) ++ return NULL; ++ ++ rtx cst_val = get_op_const_cst (out); ++ if (!cst_val) ++ // no shift case ++ return it == aes_decrypt_table::TD4 ? insn : NULL; ++ ++ return UINTVAL (cst_val) == shift_cst ? out : NULL; ++} ++ ++typedef arg_op_matcher reg_matcher; ++ ++/* Helper that matches suitable AES table references. */ ++template ++class tref_matcher ++{ ++ /* (reg >> cst) matcher. Helper. */ ++ typedef binop_matcher > table_access; ++ /* zext (reg >> cst) matcher. Used for TABLE[(val >> 24)] variant. */ ++ typedef unop_matcher direct; ++ /* zext ((reg >> cst1) & cst2) matcher. Used for ++ TABLE[(val >> (16|8)) & 0xff] variant. */ ++ typedef unop_matcher > > shifted; ++ /* zext (reg & cst) matcher. Used for TABLE[val & 0xff] variant. */ ++ typedef unop_matcher > > noshift; ++ ++ std::map table_alias; ++ ++ bool finalize (aes_table_ref &tref, ++ minimal_term_info &input_info, ++ minimal_term_info *shift_info = NULL, ++ minimal_term_info *mask_info = NULL) ++ { ++ gcc_assert (REG_P (input_info.src)); ++ gcc_assert (!shift_info || CONST_INT_P (shift_info->src)); ++ gcc_assert (!mask_info || CONST_INT_P (mask_info->src)); ++ ++ unsigned HOST_WIDE_INT shift ++ = shift_info ? UINTVAL (shift_info->src) : 0; ++ unsigned HOST_WIDE_INT mask ++ = mask_info ? UINTVAL (mask_info->src) : 0xFF; ++ if (!verify_table_access (shift, mask, mask_info)) ++ return false; ++ ++ tref.insn = input_info.loc; ++ tref.reg = input_info.src; ++ tref.lsr_cst = shift; ++ return true; ++ } ++ ++ bool match (rtx_insn *insn, rtx index, aes_table_ref &tref) ++ { ++ direct::holder_type direct_res; ++ if (direct::match (index, insn, direct_res)) ++ return finalize (tref, direct_res[0], &direct_res[1]); ++ ++ shifted::holder_type shifted_res; ++ if (shifted::match (index, insn, shifted_res)) ++ return finalize (tref, shifted_res[0], ++ &shifted_res[1], &shifted_res[2]); ++ ++ noshift::holder_type noshift_res; ++ return noshift::match (index, insn, noshift_res) ++ && finalize (tref, noshift_res[0], NULL, &noshift_res[1]); ++ } ++ ++public: ++ bool match (rtx_insn *insn, aes_table_ref &tref) ++ { ++ rtx mem = get_single_set_op (insn); ++ if (!mem && (mem = get_single_set_op (insn))) ++ mem = XEXP (mem, 0); ++ ++ rtx dst = get_single_set_dst (insn); ++ if (!mem || !MEM_P (mem) || !dst || GET_MODE (dst) != SImode) ++ return false; ++ ++ simplified_mem_info info; ++ if (!decompose_tref_mem_address (info, mem) ++ || !match (insn, info.index, tref)) ++ return false; ++ ++ typename TABLE_T::table_entry itable; ++ if (!table_alias.count (info.base_reg)) ++ { ++ itable = check_table (mem); ++ if (itable == TABLE_T::BAD_TABLE) ++ return false; ++ table_alias[info.base_reg] = itable; ++ } ++ else ++ itable = table_alias.at (info.base_reg); ++ ++ if (rtx_insn *out = get_possible_final_output (insn, dst, ++ tref.lsr_cst, ++ itable)) ++ { ++ tref.is_final = true; ++ tref.output_insn = out; ++ tref.output = NULL_RTX; ++ } ++ else ++ { ++ tref.is_final = false; ++ tref.output_insn = insn; ++ tref.output = dst; ++ } ++ ++ tref.itable = itable; ++ return true; ++ } ++}; ++ ++/* AES stage description. Required for some specializations ++ for curtain rounds. */ ++typedef enum { INPUT, MIDDLE, FINAL } aes_stage; ++ ++/* AES entity description. It can be both round or state inside round. ++ It provides interface for unified analysis between blocks of 4 parts: ++ round -> 4 states -> 4 * 4 arguments. */ ++template ++struct aes_entity ++{ ++ aes_key key; ++ std::set entries; ++ rtx_insn *loc; ++ ++ aes_entity () ++ : key (NULL), loc (NULL) ++ {} ++ ++ /* Push new entry to the entity. */ ++ bool push_entry (const ENTRY_T &v) ++ { ++ if (entries.size () == 4) ++ return false; ++ ++ entries.insert (v); ++ return true; ++ } ++ ++ /* The entities are sorted by key offset. */ ++ bool operator < (const aes_entity &rhs) const ++ { ++ return key.offset < rhs.key.offset; ++ } ++ ++ /* Verify that all of the entries are correct within their positions inside ++ the entity. */ ++ bool finalize () ++ { ++ if (entries.size () != 4) ++ return false; ++ ++ unsigned i = 0; ++ const ENTRY_T *prev = NULL; ++ for (typename std::set::iterator it = entries.begin (); ++ it != entries.end (); prev = &*it++, ++i) ++ if (!it->verify (prev, i)) ++ return false; ++ ++ loc = entries.begin ()->loc; ++ return true; ++ } ++}; ++ ++/* Check the correctness of input regs permutations. */ ++template ++static bool ++check_input_regs (const std::vector &curr, ++ const std::vector &prev); ++ ++/* Specialize the function for AES encryption. */ ++template<> ++bool ++check_input_regs (const std::vector &curr, ++ const std::vector &prev) ++{ ++ gcc_assert (curr.size () == 4 && prev.size () == 4); ++ unsigned idx[4] = { 1, 2, 3, 0 }; ++ for (int i = 0; i < 4; ++i) ++ if (curr[i] != prev[idx[i]]) ++ return false; ++ return true; ++} ++ ++/* Specialize the function for AES decryption. */ ++template<> ++bool ++check_input_regs (const std::vector &curr, ++ const std::vector &prev) ++{ ++ gcc_assert (curr.size () == 4 && prev.size () == 4); ++ unsigned idx[4] = { 3, 0, 1, 2 }; ++ for (int i = 0; i < 4; ++i) ++ if (curr[i] != prev[idx[i]]) ++ return false; ++ return true; ++} ++ ++/* Basic descryption of state input. */ ++template ++struct state_input ++{ ++ typedef std::vector type; ++ ++ static void finalize (type &in, rtx v) ++ { ++ in.push_back (v); ++ } ++ ++ template ++ static bool verify (const type &lhs, const type &rhs) ++ { ++ return check_input_regs (lhs, rhs); ++ } ++}; ++ ++/* Input round state uses special input. */ ++template<> ++struct state_input ++{ ++ typedef std::pair type; ++ ++ static void finalize (type &in, const type &v) ++ { ++ in = v; ++ // Order is inverted ++ in.second -= 3; ++ } ++ ++ template ++ static bool verify (const type &lhs, const type &rhs) ++ { ++ return lhs.first == rhs.first ++ && lhs.second == rhs.second + 4; ++ } ++}; ++ ++/* Basic descryption of state output. */ ++template ++struct state_output ++{ ++ typedef rtx type; ++ ++ static bool verify (const type &, const type &) ++ { ++ return true; ++ } ++}; ++ ++/* Final round state generates special output. */ ++template<> ++struct state_output ++{ ++ typedef std::pair type; ++ ++ static bool verify (const type &lhs, const type &rhs) ++ { ++ return lhs.first == rhs.first ++ && lhs.second == rhs.second + 4; ++ } ++}; ++ ++/* Basic descryption of round input. */ ++template ++struct round_input ++{ ++ typedef std::vector type; ++}; ++ ++/* Input round uses special input just as its state. */ ++template<> ++struct round_input ++{ ++ typedef std::pair type; ++}; ++ ++/* Basic descryption of round output. */ ++template ++struct round_output ++{ ++ typedef std::vector type; ++ ++ template ++ static void finalize (type &out, const T &v) ++ { ++ gcc_assert (v.size () == 4); ++ for (typename T::const_iterator it = v.begin (); it != v.end (); ++it) ++ out.push_back (it->output); ++ } ++ ++ template ++ static void reorder (type &) ++ {} ++}; ++ ++/* Reorder output for AES decryption: the order is changed compared to ++ AES encryption. */ ++template<> ++template<> ++void round_output::reorder (type &out) ++{ ++ gcc_assert (out.size () == 4); ++ std::swap (out[1], out[3]); ++} ++ ++template<> ++template<> ++void round_output::reorder (type &out) ++{ ++ round_output::reorder (out); ++} ++ ++/* Final round generates special output. */ ++template<> ++struct round_output : state_output ++{ ++ template ++ static void finalize (type &out, const T &v) ++ { ++ gcc_assert (v.size () == 4); ++ out = v.begin ()->output; ++ } ++ ++ template ++ static void reorder (type &) ++ {} ++}; ++ ++/* AES state descryption. */ ++template ++struct aes_state : aes_entity ++{ ++ typedef aes_entity base_entity; ++ ++ typename state_input::type input; ++ typename state_output::type output; ++ ++ aes_state () ++ : base_entity () ++ {} ++ ++ void set_output (const typename state_output::type &o) ++ { ++ output = o; ++ } ++ ++ bool push_entry (const ENTRY_T &v) ++ { ++ if (!v.is_key) ++ return base_entity::push_entry (v); ++ ++ if (this->key.src) ++ return false; ++ ++ this->key = v; ++ return true; ++ } ++ ++ /* Verify if the state is correct within its position in round. */ ++ bool verify (const aes_state *prev, unsigned) const ++ { ++ if (!prev) ++ return true; ++ ++ if (!this->key.has_same_origin (prev->key, this->loc) ++ || this->key.offset != prev->key.offset + 4 ++ || BLOCK_FOR_INSN (this->loc) != BLOCK_FOR_INSN (prev->loc)) ++ return false; ++ ++ return state_input::template verify (input, prev->input) ++ && state_output::verify (output, prev->output); ++ } ++ ++ /* Check if the entries of the state are correct and finalize stored info. */ ++ bool finalize () ++ { ++ if (!base_entity::finalize ()) ++ return false; ++ ++ for (typename std::set::iterator it = this->entries.begin (); ++ it != this->entries.end (); ++it) ++ state_input::finalize (input, it->input ()); ++ ++ return true; ++ } ++}; ++ ++/* AES round descryption. */ ++template ++struct aes_round : aes_entity, STAGE, K> ++{ ++ typedef aes_entity, STAGE, K> base_entity; ++ ++ typename round_input::type input; ++ typename round_output::type output; ++ ++ /* Check if the states are correct and finalize stored info. */ ++ bool finalize () ++ { ++ if (!base_entity::finalize ()) ++ return false; ++ ++ input = this->entries.begin ()->input; ++ this->key = this->entries.begin ()->key; ++ ++ round_output::finalize (output, this->entries); ++ round_output::template reorder (output); ++ ++ return true; ++ } ++}; ++ ++template ++class aes_optimizer; ++ ++/* AES round input info. Used to find and store info about ++ table references. ++ ++ Must be inited and finalized before and after usage. */ ++template ++struct round_input_info : state_input_info ++{ ++ typedef typename aes_optimizer::table_ref_map tref_map; ++ ++ round_input_info () ++ {} ++ round_input_info (rtx_insn *insn, const aes_table_ref *tref) ++ : state_input_info (mem_term_info (insn, NULL_RTX)), tref (tref) ++ {} ++ round_input_info (const aes_key &k) ++ : state_input_info (k) ++ {} ++ ++ rtx input () const ++ { ++ return tref->reg; ++ } ++ ++ rtx output () const ++ { ++ return tref->output; ++ } ++ ++ /* Table references are sorted by shift constants. ++ TODO: probably sort by key offset? */ ++ bool operator < (const round_input_info &rhs) const ++ { ++ return tref->lsr_cst > rhs.tref->lsr_cst; ++ } ++ ++ bool verify (const round_input_info *prev, unsigned i) const ++ { ++ return state_input_info::verify (prev) && tref->verify (i); ++ } ++ ++ static bool finalize (rtx_insn *insn, round_input_info *m) ++ { ++ if (checked_p->count (insn)) ++ return false; ++ ++ typename tref_map::const_iterator it = table_refs_p->find (insn); ++ if (it == table_refs_p->end ()) ++ return false; ++ ++ m[0] = round_input_info (insn, &it->second); ++ return true; ++ } ++ ++ const aes_table_ref *tref; ++ ++ static const tref_map *table_refs_p; ++ static const std::set *checked_p; ++ ++ /* Store lookup table references. */ ++ static void init (const tref_map &t, const std::set &c) ++ { ++ gcc_assert (!table_refs_p && !checked_p); ++ table_refs_p = &t; ++ checked_p = &c; ++ } ++ ++ /* Remove lookup table references. */ ++ static void fin () ++ { ++ gcc_assert (table_refs_p && checked_p); ++ table_refs_p = NULL; ++ checked_p = NULL; ++ } ++}; ++ ++template ++const typename aes_optimizer::table_ref_map * ++round_input_info::table_refs_p = NULL; ++ ++template ++const std::set * ++round_input_info::checked_p = NULL; ++ ++/* AES encryption/decryption optimizer. */ ++template ++class aes_optimizer ++{ ++public: ++ typedef std::map > table_ref_map; ++ ++ /* AES states typedefs. */ ++ typedef aes_state aes_input_state; ++ typedef aes_state, MIDDLE, T> aes_body_state; ++ typedef aes_state, FINAL, T> aes_final_state; ++ ++ /* AES rounds typedefs. */ ++ typedef aes_round aes_input_round; ++ typedef aes_round, MIDDLE, T> aes_body_round; ++ typedef aes_round, FINAL, T> aes_final_round; ++ ++ bool run (); ++ ++private: ++ bool collect_aes_lookup_tables (); ++ bool form_rounds (); ++ bool find_aes_init_round (); ++ bool collect_state (rtx_insn * insn, aes_body_state &state, ++ std::set &checked); ++ bool find_aes_rounds (); ++ bool collect_final_round (rtx_insn *insn, aes_final_state &state, ++ std::set &checked); ++ bool find_aes_final_round (); ++ bool check_aes_pattern (); ++ void erase_unused_rounds (std::set *> &used); ++ ++ bool gen_aes_code (); ++ bool gen_init_round (); ++ bool gen_round (const aes_body_round &round); ++ bool gen_final_round (); ++ ++ rtx gen_or_get_vreg (const std::vector &vec); ++ rtx get_vreg (const std::vector &vec); ++ rtx gen_vreg (const std::vector &vec); ++ ++ table_ref_map table_refs; ++ table_ref_map final_table_refs; ++ ++ aes_input_round input_round; ++ std::map, aes_body_round> rounds; ++ aes_final_round final_round; ++ ++ std::map, rtx> vec_regs; ++ std::vector to_delete; ++}; ++ ++/* Find all the AES table references in function. */ ++template ++bool ++aes_optimizer::collect_aes_lookup_tables () ++{ ++ basic_block bb; ++ rtx_insn *insn; ++ ++ tref_matcher m; ++ FOR_EACH_BB_FN (bb, cfun) ++ FOR_BB_INSNS (bb, insn) ++ { ++ aes_table_ref tref; ++ if (!m.match (insn, tref)) ++ continue; ++ ++ if (!tref.is_final) ++ table_refs[insn] = tref; ++ else ++ final_table_refs[tref.output_insn] = tref; ++ } ++ ++ return !table_refs.empty () && !final_table_refs.empty (); ++} ++ ++/* Helper function to match all the permutations of five arg ++ calculations. */ ++template ++struct five_args_calc_matcher ++{ ++ /* Helper for matching (op1 * op2). */ ++ typedef binop_matcher two_args_block; ++ /* Helper for matching (op1 * (op2 * op3)). */ ++ typedef binop_matcher three_args_block; ++ /* Helper for matching ((op1 * op2) * (op3 * op4)). */ ++ typedef binop_matcher opt_four_args_block; ++ /* Helper for matching (op1 * (op2 * (op3 * op4))). */ ++ typedef binop_matcher linear_four_args_block; ++ ++ /* Match the (op1 * ((op2 * op3) * (op4 * op5))) variant. */ ++ typedef binop_matcher opt_op_term; ++ /* Match the ((op1 * op2) * (op3 * (op4 * op5))) variant. */ ++ typedef binop_matcher three_op_two; ++ /* Match the (op1 * (op2 * (op3 * (op4 * op5)))) variant. */ ++ typedef binop_matcher fully_linear; ++ ++ static const int holder_size = fully_linear::holder_size; ++ static const int op_num = fully_linear::op_num; ++ typedef typename fully_linear::term_type term_type; ++ typedef typename fully_linear::holder_type holder_type; ++ ++ static rtx_insn* match (rtx_insn *insn, holder_type &m, unsigned depth = 1) ++ { ++ for (rtx dst = get_single_set_dst (insn); depth && insn && dst; ++ insn = get_single_use_insn (insn, dst), ++ dst = insn ? get_single_set_dst (insn) : NULL_RTX, ++ --depth) ++ if (opt_op_term::match (insn, m) || three_op_two::match (insn, m) ++ || fully_linear::match (insn, m)) ++ return insn; ++ return NULL; ++ } ++}; ++ ++/* Match the AES key. */ ++struct key_matcher : matcher_term ++{ ++ static bool match (rtx_insn *insn, holder_type &m) ++ { ++ mem_matcher::holder_type info; ++ if (!mem_matcher::match (insn, info)) ++ return false; ++ ++ m[0] = info[0]; ++ return true; ++ } ++}; ++ ++/* Matcher term for state input. */ ++template ++struct state_input_term : matcher_term ++{ ++ typedef typename matcher_term::holder_type holder_type; ++ ++ static bool match (rtx, rtx_insn *, holder_type &) ++ { ++ return false; ++ } ++ ++ static bool match (rtx_insn *insn, holder_type &m) ++ { ++ key_matcher::holder_type k; ++ if (key_matcher::match (insn, k)) ++ { ++ m[0] = k[0]; ++ return true; ++ } ++ ++ return matcher_term::term_type::finalize (insn, m); ++ } ++}; ++ ++/* Fill state from args. */ ++template ++static bool ++finalize_input (const T (&args)[5], STATE &state) ++{ ++ for (unsigned i = 0; i < 5; ++i) ++ if (!state.push_entry (args[i])) ++ return false; ++ ++ return state.finalize (); ++} ++ ++/* Construct input state. */ ++template ++static bool ++form_input (rtx_insn *insn, T &state) ++{ ++ typedef five_args_calc_matcher > ++ matcher; ++ ++ matcher::holder_type m; ++ if (!matcher::match (insn, m) || !finalize_input (m, state)) ++ return false; ++ ++ /* TODO: probably should not be set here. */ ++ state.set_output (SET_DEST (single_set (insn))); ++ return true; ++} ++ ++/* Get definitions chain for the reg being used in the insn. */ ++static df_link * ++get_defs (rtx_insn *insn, rtx reg) ++{ ++ df_link *ref_chain = get_def_chain (insn, reg); ++ gcc_assert (ref_chain); ++ ++ for (df_link *ref_link = ref_chain; ref_link; ref_link = ref_link->next) ++ if (!check_def_chain_ref (ref_link->ref, reg)) ++ return NULL; ++ ++ return ref_chain; ++} ++ ++/* Find AES init round. To do this, find the table references that depends on ++ two definitions. One of them is our input. */ ++template ++bool ++aes_optimizer::find_aes_init_round () ++{ ++ std::set checked; ++ ++ for (typename table_ref_map::iterator it = table_refs.begin (), ++ end = table_refs.end (); it != end; ++it) ++ for (df_link *def = get_defs (it->second.insn, it->second.reg); ++ def; def = def->next) ++ { ++ rtx_insn *def_insn = DF_REF_INSN (def->ref); ++ if (checked.count (def_insn)) ++ continue; ++ ++ aes_input_state input_state; ++ if (form_input (def_insn, input_state) ++ && !input_round.push_entry (input_state)) ++ return false; ++ ++ checked.insert (def_insn); ++ } ++ ++ return input_round.finalize (); ++} ++ ++/* Collect AES inner state. */ ++template ++bool ++aes_optimizer::collect_state (rtx_insn *insn, aes_body_state &state, ++ std::set &checked) ++{ ++ typedef round_input_info term_info; ++ typedef five_args_calc_matcher > matcher; ++ ++ typename matcher::holder_type m; ++ term_info::init (table_refs, checked); ++ rtx_insn *match_entry = matcher::match (insn, m, 3); ++ term_info::fin (); ++ ++ if (!match_entry || !finalize_input (m, state)) ++ return false; ++ ++ /* TODO: probably should not be set here. */ ++ state.set_output (SET_DEST (single_set (match_entry))); ++ for (unsigned i = 0; i < 5; ++i) ++ if (!m[i].is_key) ++ checked.insert (m[i].tref->output_insn); ++ ++ return true; ++} ++ ++/* Simple sorter to link rounds by their registers. */ ++struct reg_comp ++{ ++ bool operator () (rtx lhs, rtx rhs) const ++ { ++ return REGNO (lhs) < REGNO (rhs); ++ } ++}; ++ ++/* Find AES inner rounds. */ ++template ++bool ++aes_optimizer::find_aes_rounds () ++{ ++ typedef std::set input_key; ++ ++ std::set checked; ++ std::map candidate_rounds; ++ for (typename table_ref_map::iterator it = table_refs.begin (), ++ end = table_refs.end (); it != end; ++it) ++ { ++ rtx_insn *insn = it->first; ++ if (checked.count (insn)) ++ continue; ++ ++ rtx_insn *use = get_single_use_insn (insn, SET_DEST (single_set (insn))); ++ if (!use) ++ continue; ++ ++ aes_body_state state; ++ if (!collect_state (use, state, checked)) ++ continue; ++ ++ /* Sort the input so we can found the corresponding state. */ ++ input_key input (state.input.begin (), state.input.end ()); ++ candidate_rounds[input].push_entry (state); ++ } ++ ++ for (typename std::map::iterator ++ it = candidate_rounds.begin (); ++ it != candidate_rounds.end (); ++it) ++ if (it->second.finalize ()) ++ rounds[it->second.input] = it->second; ++ ++ return !rounds.empty (); ++} ++ ++template ++struct final_state_matcher; ++ ++/* AES encrypt matcher requires additional check on key calculations ++ due to possible optimizations. */ ++template<> ++struct final_state_matcher ++{ ++ typedef round_input_info term_info; ++ typedef five_args_calc_matcher, IOR, true> ++ matcher; ++ typedef typename matcher::term_type ++ holder_type[matcher::holder_size - matcher::op_num]; ++ ++ static rtx_insn *match (rtx_insn *insn, holder_type &m, unsigned depth) ++ { ++ matcher::holder_type inner_m; ++ rtx_insn *res = matcher::match (insn, inner_m, depth); ++ if (!res) ++ return NULL; ++ ++ /* Run pre-order traversal of the operands to check the correctness ++ of key usage. */ ++ gcc_assert (inner_m[0].is_op); ++ unsigned pos = 0; ++ if (!check_key_calculations (inner_m, pos)) ++ return NULL; ++ gcc_assert (pos == (matcher::holder_size - 1)); ++ ++ unsigned idx = 0; ++ for (unsigned i = 0; i < matcher::holder_size; ++i) ++ if (!inner_m[i].is_op) ++ m[idx++] = inner_m[i]; ++ ++ gcc_assert (idx == 5); ++ return res; ++ } ++ ++ static bool check_key_calculations (const matcher::holder_type &m, ++ unsigned &idx, ++ bool failure_on_key = false) ++ { ++ gcc_assert (idx < matcher::holder_size); ++ if (!m[idx].is_op) ++ return !(failure_on_key && m[idx].is_key); ++ ++ failure_on_key |= (GET_CODE (m[idx].src) == IOR); ++ return check_key_calculations (m, ++idx, failure_on_key) ++ && check_key_calculations (m, ++idx, failure_on_key); ++ } ++}; ++ ++ ++/* The final state is simple wrapper since no additional checks are required ++ here. */ ++template<> ++struct final_state_matcher ++{ ++ typedef round_input_info term_info; ++ typedef five_args_calc_matcher > matcher; ++ typedef typename matcher::holder_type holder_type; ++ ++ static rtx_insn *match (rtx_insn *insn, holder_type &m, unsigned depth) ++ { ++ return matcher::match (insn, m, depth); ++ } ++}; ++ ++/* Match the AES final state. */ ++template ++bool ++aes_optimizer::collect_final_round (rtx_insn *insn, aes_final_state &state, ++ std::set &checked) ++{ ++ typedef final_state_matcher matcher_wrapper; ++ ++ typename matcher_wrapper::holder_type m; ++ matcher_wrapper::term_info::init (final_table_refs, checked); ++ rtx_insn *match_entry = matcher_wrapper::match (insn, m, 3); ++ matcher_wrapper::term_info::fin (); ++ ++ rtx dst; ++ if (!match_entry || !(dst = get_single_set_dst (match_entry)) ++ || !finalize_input (m, state)) ++ return false; ++ ++ rtx src; ++ if (!(match_entry = get_single_use_insn (match_entry, dst)) ++ || !(check_simple_op (match_entry, src, dst)) ++ || !dst) ++ return false; ++ ++ std::pair output; ++ if (!(match_entry = get_single_use_insn (match_entry, dst)) ++ || !(dst = get_single_set_dst (match_entry)) ++ || !decompose_mem (dst, output.first, output.second)) ++ return false; ++ ++ to_delete.push_back (match_entry); ++ state.set_output (output); ++ for (unsigned i = 0; i < 5; ++i) ++ if (!m[i].is_key) ++ checked.insert (m[i].tref->output_insn); ++ ++ return true; ++} ++ ++/* Find the final round. */ ++template ++bool ++aes_optimizer::find_aes_final_round () ++{ ++ std::set checked; ++ for (typename table_ref_map::iterator it = final_table_refs.begin (), ++ end = final_table_refs.end (); it != end; ++it) ++ { ++ rtx_insn *insn = it->first; ++ ++ if (checked.count (insn)) ++ continue; ++ ++ rtx_insn *use = get_single_use_insn (insn, SET_DEST (single_set (insn))); ++ if (!use) ++ continue; ++ ++ aes_final_state state; ++ if (collect_final_round (use, state, checked)) ++ final_round.push_entry (state); ++ } ++ ++ return final_round.finalize (); ++} ++ ++template ++bool ++aes_optimizer::form_rounds () ++{ ++ return find_aes_final_round () ++ && find_aes_init_round () ++ && find_aes_rounds (); ++} ++ ++template ++void ++aes_optimizer::erase_unused_rounds (std::set *> &used) ++{ ++ if (used.size () == rounds.size ()) ++ return; ++ ++ for (typename std::map, aes_body_round>::iterator ++ it = rounds.begin (), next = it, ++ end = rounds.end (); it != end; it = next) ++ { ++ ++next; ++ if (!used.count (&it->first)) ++ rounds.erase (it); ++ } ++} ++ ++/* Find round starts and link them together. */ ++template ++bool ++aes_optimizer::check_aes_pattern () ++{ ++ std::set *> checked; ++ ++ typename std::map, aes_body_round>::iterator fit ++ = rounds.find (input_round.output); ++ ++ bool to_final = false; ++ while (fit != rounds.end () && !checked.count (&fit->first)) ++ { ++ checked.insert (&fit->first); ++ ++ if (fit->second.output == final_round.input) ++ to_final = true; ++ ++ fit = rounds.find (fit->second.output); ++ } ++ ++ if (!to_final) ++ return false; ++ ++ erase_unused_rounds (checked); ++ ++ return true; ++} ++ ++static bool ++gen_insns (const rtx patterns[4], rtx_insn *loc) ++{ ++ start_sequence (); ++ for (unsigned i = 0; i < 4; ++i) ++ { ++ rtx_insn *insn = emit_insn (patterns[i]); ++ if (recog_memoized (insn) < 0) ++ { ++ end_sequence (); ++ return false; ++ } ++ } ++ ++ rtx_insn *seq = get_insns (); ++ end_sequence (); ++ emit_insn_after (seq, loc); ++ ++ return true; ++} ++ ++static rtx ++gen_offset_access (rtx base, unsigned HOST_WIDE_INT offset) ++{ ++ if (!offset) ++ return base; ++ ++ machine_mode mode = GET_MODE (base); ++ return gen_rtx_PLUS (mode, base, gen_rtx_CONST_INT (mode, offset)); ++} ++ ++template ++rtx ++aes_optimizer::get_vreg (const std::vector &vec) ++{ ++ std::map, rtx>::iterator fit = vec_regs.find (vec); ++ if (fit != vec_regs.end ()) ++ return fit->second; ++ ++ return 0; ++} ++ ++template ++rtx ++aes_optimizer::gen_vreg (const std::vector &vec) ++{ ++ machine_mode vmode = targetm.get_v16qi_mode (); ++ rtx vreg = gen_reg_rtx (vmode); ++ vec_regs.insert (std::make_pair (vec, vreg)); ++ ++ return vreg; ++} ++ ++template ++rtx ++aes_optimizer::gen_or_get_vreg (const std::vector &vec) ++{ ++ rtx vreg = get_vreg (vec); ++ if (!vreg) ++ vreg = gen_vreg (vec); ++ ++ return vreg; ++} ++ ++template ++static rtx ++gen_aes_single_round (rtx vout, rtx vreg, rtx vkey); ++template ++static rtx ++gen_aes_mix_columns (rtx vreg, rtx vin); ++ ++template<> ++rtx ++gen_aes_single_round (rtx vout, rtx vreg, rtx vkey) ++{ ++ return targetm.gen_aesev16qi (vout, vreg, vkey); ++} ++ ++template<> ++rtx ++gen_aes_mix_columns (rtx vreg, rtx vin) ++{ ++ return targetm.gen_aesmcv16qi (vreg, vin); ++} ++ ++template<> ++rtx ++gen_aes_single_round (rtx vout, rtx vreg, rtx vkey) ++{ ++ return targetm.gen_aesdv16qi (vout, vreg, vkey); ++} ++ ++template<> ++rtx ++gen_aes_mix_columns (rtx vreg, rtx vin) ++{ ++ return targetm.gen_aesimcv16qi (vreg, vin); ++} ++ ++template ++bool ++aes_optimizer::gen_init_round () ++{ ++ rtx_insn *loc = input_round.loc; ++ ++ machine_mode vmode = targetm.get_v16qi_mode (); ++ ++ rtx vreg = gen_reg_rtx (vmode); ++ rtx vkey = gen_reg_rtx (vmode); ++ rtx vout = gen_vreg (input_round.output); ++ ++ rtx buf = input_round.input.first; ++ rtx key = gen_offset_access (input_round.key.src, input_round.key.offset); ++ ++ rtx vload_pat = gen_rtx_SET (vreg, ++ gen_rtx_MEM (vmode, buf)); ++ rtx vkey_load_pat = gen_rtx_SET (vkey, ++ gen_rtx_MEM (vmode, key)); ++ rtx vrev_pat = targetm.gen_rev32v16qi (vkey, vkey); ++ rtx vaes_pat = gen_aes_single_round (vout, vreg, vkey); ++ ++ const rtx patterns[4] = {vload_pat, vkey_load_pat, vrev_pat, vaes_pat}; ++ ++ return gen_insns (patterns, loc); ++} ++ ++template ++bool ++aes_optimizer::gen_round (const aes_body_round &round) ++{ ++ rtx_insn *loc = round.loc; ++ ++ machine_mode vmode = targetm.get_v16qi_mode (); ++ ++ rtx vreg = gen_reg_rtx (vmode); ++ rtx vkey = gen_reg_rtx (vmode); ++ rtx vin = gen_or_get_vreg (round.input); ++ rtx vout = gen_or_get_vreg (round.output); ++ ++ rtx key = gen_offset_access (round.key.src, round.key.offset); ++ ++ rtx vkey_load_pat = gen_rtx_SET (vkey, ++ gen_rtx_MEM (vmode, key)); ++ rtx vrev_pat = targetm.gen_rev32v16qi (vkey, vkey); ++ rtx vmix_pat = gen_aes_mix_columns (vreg, vin); ++ rtx vaes_pat = gen_aes_single_round (vout, vreg, vkey); ++ ++ const rtx patterns[4] = {vkey_load_pat, vrev_pat, vmix_pat, vaes_pat}; ++ ++ return gen_insns (patterns, loc); ++} ++ ++template ++bool ++aes_optimizer::gen_final_round () ++{ ++ rtx_insn *loc = final_round.loc; ++ ++ machine_mode vmode = targetm.get_v16qi_mode (); ++ ++ rtx vreg = gen_reg_rtx (vmode); ++ rtx vkey = gen_reg_rtx (vmode); ++ rtx vin = get_vreg (final_round.input); ++ ++ gcc_assert (vin); ++ ++ rtx buf = final_round.output.first; ++ rtx key = gen_offset_access (final_round.key.src, final_round.key.offset); ++ ++ rtx vkey_load_pat = gen_rtx_SET (vkey, ++ gen_rtx_MEM (vmode, key)); ++ rtx vrev_pat = targetm.gen_rev32v16qi (vkey, vkey); ++ rtx vxor_pat = gen_rtx_SET (vreg, gen_rtx_XOR (vmode, vin, vkey)); ++ rtx vstore_pat = gen_rtx_SET (gen_rtx_MEM (vmode, buf), vreg); ++ ++ const rtx patterns[4] = {vkey_load_pat, vrev_pat, vxor_pat, vstore_pat}; ++ ++ return gen_insns (patterns, loc); ++} ++ ++template ++bool ++aes_optimizer::gen_aes_code () ++{ ++ if (!gen_init_round ()) ++ return false; ++ ++ for (typename std::map, aes_body_round>::iterator ++ it = rounds.begin (), end = rounds.end (); it != end; ++it) ++ { ++ if (!gen_round (it->second)) ++ return false; ++ } ++ ++ if (!gen_final_round ()) ++ return false; ++ ++ for (std::vector::iterator it = to_delete.begin (), ++ end = to_delete.end (); it != end; ++it) ++ SET_INSN_DELETED (*it); ++ ++ return true; ++} ++ ++template ++bool ++aes_optimizer::run () ++{ ++ return collect_aes_lookup_tables () ++ && form_rounds () ++ && check_aes_pattern () ++ && gen_aes_code (); ++} ++ ++static unsigned int ++crypto_acceleration () ++{ ++ aes_optimizer enc; ++ aes_optimizer dec; ++ enc.run (); ++ dec.run (); ++ ++ return 0; ++} ++ ++static void ++init_df () ++{ ++ df_set_flags (DF_RD_PRUNE_DEAD_DEFS); ++ df_chain_add_problem (DF_UD_CHAIN + DF_DU_CHAIN); ++ df_mir_add_problem (); ++ df_live_add_problem (); ++ df_live_set_all_dirty (); ++ df_analyze (); ++ df_set_flags (DF_DEFER_INSN_RESCAN); ++} ++ ++namespace { ++ ++const pass_data pass_data_crypto_accel = ++{ ++ RTL_PASS, // type ++ "crypto_accel", // name ++ OPTGROUP_NONE, // optinfo_flags ++ TV_CRYPTO_ACCEL, // tv_id ++ PROP_cfglayout, // properties_required ++ 0, // properties_provided ++ 0, // properties_destroyed ++ 0, // todo_flags_start ++ TODO_df_finish, // todo_flags_finish ++}; ++ ++class pass_crypto_accel : public rtl_opt_pass ++{ ++public: ++ pass_crypto_accel (gcc::context *ctxt) ++ : rtl_opt_pass (pass_data_crypto_accel, ctxt) ++ {} ++ ++ /* opt_pass methods: */ ++ virtual bool gate (function *) ++ { ++ if (flag_crypto_accel_aes <= 0) ++ return false; ++ return targetm.get_v16qi_mode ++ && targetm.gen_rev32v16qi ++ && targetm.gen_aesev16qi ++ && targetm.gen_aesmcv16qi; ++ } ++ ++ virtual unsigned int execute (function *) ++ { ++ init_df (); ++ return crypto_acceleration (); ++ } ++}; // class pass_crypto_accel ++ ++} // anon namespace ++ ++rtl_opt_pass * ++make_pass_crypto_accel (gcc::context *ctxt) ++{ ++ return new pass_crypto_accel (ctxt); ++} +diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi +index 3b6e90bf2..2aba523bb 100644 +--- a/gcc/doc/tm.texi ++++ b/gcc/doc/tm.texi +@@ -12125,6 +12125,35 @@ types. If @var{has_wb} is not NULL then its value is set to true if STP + contains post-index or pre-index operation. + @end deftypefn + ++@deftypefn {Target Hook} machine_mode TARGET_GET_V16QI_MODE () ++This function get the 16 byte elements vector mode if target supports this. ++@end deftypefn ++ ++@deftypefn {Target Hook} rtx TARGET_GEN_REV32V16QI (rtx @var{dest}, rtx @var{src}) ++This function generate the byte reverse instruction ++ of 16 byte elements vector if target supports this. ++@end deftypefn ++ ++@deftypefn {Target Hook} rtx TARGET_GEN_AESEV16QI (rtx @var{dest}, rtx @var{src1}, rtx @var{src2}) ++This function generate the AES encryption instruction ++ of 16 byte elements vector if target supports this. ++@end deftypefn ++ ++@deftypefn {Target Hook} rtx TARGET_GEN_AESDV16QI (rtx @var{dest}, rtx @var{src1}, rtx @var{src2}) ++This function generate the AES decryption instruction ++ of 16 byte elements vector if target supports this. ++@end deftypefn ++ ++@deftypefn {Target Hook} rtx TARGET_GEN_AESMCV16QI (rtx @var{dest}, rtx @var{src}) ++This function generate the AES mix columns instruction ++ of 16 byte elements vector if target supports this. ++@end deftypefn ++ ++@deftypefn {Target Hook} rtx TARGET_GEN_AESIMCV16QI (rtx @var{dest}, rtx @var{src}) ++This function generate the AES inversed mix columns instruction ++ of 16 byte elements vector if target supports this. ++@end deftypefn ++ + @deftypefn {Target Hook} bool TARGET_CANNOT_MODIFY_JUMPS_P (void) + This target hook returns @code{true} past the point in which new jump + instructions could be created. On machines that require a register for +diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in +index 6ff60e562..817d586ff 100644 +--- a/gcc/doc/tm.texi.in ++++ b/gcc/doc/tm.texi.in +@@ -7981,6 +7981,18 @@ lists. + + @hook TARGET_IS_STP_INSN + ++@hook TARGET_GET_V16QI_MODE ++ ++@hook TARGET_GEN_REV32V16QI ++ ++@hook TARGET_GEN_AESEV16QI ++ ++@hook TARGET_GEN_AESDV16QI ++ ++@hook TARGET_GEN_AESMCV16QI ++ ++@hook TARGET_GEN_AESIMCV16QI ++ + @hook TARGET_CANNOT_MODIFY_JUMPS_P + + @hook TARGET_HAVE_CONDITIONAL_EXECUTION +diff --git a/gcc/passes.def b/gcc/passes.def +index a30e05688..b7d4f7b4e 100644 +--- a/gcc/passes.def ++++ b/gcc/passes.def +@@ -475,6 +475,7 @@ along with GCC; see the file COPYING3. If not see + NEXT_PASS (pass_rtl_fwprop_addr); + NEXT_PASS (pass_inc_dec); + NEXT_PASS (pass_initialize_regs); ++ NEXT_PASS (pass_crypto_accel); + NEXT_PASS (pass_ud_rtl_dce); + NEXT_PASS (pass_combine); + NEXT_PASS (pass_if_after_combine); +diff --git a/gcc/rtl-matcher.h b/gcc/rtl-matcher.h +new file mode 100644 +index 000000000..6aed8d98d +--- /dev/null ++++ b/gcc/rtl-matcher.h +@@ -0,0 +1,367 @@ ++/* Helpers for RTL pattern matchers. ++ Copyright (C) 2003-2023 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#ifndef GCC_RTL_MATCHER_H ++#define GCC_RTL_MATCHER_H ++ ++#include "config.h" ++#include "system.h" ++#include "rtl.h" ++#include "df.h" ++ ++/* Get definitions chain for the reg being used in insn. */ ++static df_link * ++get_def_chain (rtx_insn *insn, rtx reg) ++{ ++ df_ref use; ++ FOR_EACH_INSN_USE (use, insn) ++ { ++ rtx use_reg = DF_REF_REG (use); ++ if (GET_CODE (use_reg) == SUBREG) ++ { ++ if (REGNO (SUBREG_REG (use_reg)) == REGNO (reg)) ++ return NULL; ++ } ++ else ++ { ++ gcc_assert (REG_P (use_reg)); ++ if (REGNO (use_reg) == REGNO (reg)) ++ return DF_REF_CHAIN (use); ++ } ++ } ++ ++ return NULL; ++} ++ ++/* Check if the reg is not global and actually modified in the ref. */ ++static bool ++check_def_chain_ref (df_ref ref, rtx reg) ++{ ++ if (!ref || !DF_REF_INSN_INFO (ref)) ++ return false; ++ ++ return !global_regs[REGNO (reg)] ++ || set_of (reg, DF_REF_INSN (ref)); ++} ++ ++/* Get the single def instruction of the reg being used in the insn. */ ++static rtx_insn * ++get_single_def_insn (rtx_insn *insn, rtx reg) ++{ ++ if (!REG_P (reg)) ++ return NULL; ++ ++ df_link *ref_chain = get_def_chain (insn, reg); ++ gcc_assert (ref_chain); ++ ++ if (!ref_chain || ref_chain->next ++ || !check_def_chain_ref (ref_chain->ref, reg)) ++ return NULL; ++ ++ return DF_REF_INSN (ref_chain->ref); ++} ++ ++/* Get the single user instruction of the reg being set in the insn. */ ++static rtx_insn * ++get_single_use_insn (rtx_insn *insn, rtx reg) ++{ ++ df_ref def; ++ struct df_link *ref_chain; ++ ++ if (!REG_P (reg)) ++ return NULL; ++ ++ FOR_EACH_INSN_DEF (def, insn) ++ if (REGNO (DF_REF_REG (def)) == REGNO (reg)) ++ break; ++ ++ gcc_assert (def && "Broken def-use analysis chain."); ++ ++ ref_chain = DF_REF_CHAIN (def); ++ ++ if (!ref_chain || ref_chain->next || !ref_chain->ref) ++ return NULL; ++ ++ return DF_REF_INSN (ref_chain->ref); ++} ++ ++/* Get the rtx pattern of suitable opcode from single set instruction. */ ++template ++static rtx ++get_single_set_op (rtx_insn *insn) ++{ ++ rtx pat = single_set (insn); ++ if (!pat) ++ return NULL_RTX; ++ ++ rtx src = SET_SRC (pat); ++ if (GET_CODE (src) != OP1 && GET_CODE (src) != OP2) ++ return NULL_RTX; ++ ++ return src; ++} ++ ++/* Get the rtx pattern of suitable opcode from single set instruction. */ ++template ++static rtx ++get_single_set_op (rtx_insn *insn) ++{ ++ return get_single_set_op (insn); ++} ++ ++/* Get the rtx constant from single set instruction of suitable opcode. */ ++template ++static rtx ++get_op_const_cst (rtx_insn *insn) ++{ ++ rtx src = get_single_set_op (insn); ++ if (!src) ++ return NULL_RTX; ++ ++ rtx cst = XEXP (src, 1); ++ return CONST_INT_P (cst) ? cst : NULL_RTX; ++} ++ ++/* Get the rtx destination from single set instruction of suitable opcode. */ ++template ++static rtx ++get_single_set_dst (rtx_insn *insn) ++{ ++ rtx pat = single_set (insn); ++ if (!pat) ++ return NULL_RTX; ++ ++ rtx dst = SET_DEST (pat); ++ if (GET_CODE (dst) != OP) ++ return NULL_RTX; ++ ++ return dst; ++} ++ ++/* Get the rtx destination from single set instruction. */ ++static rtx ++get_single_set_dst (rtx_insn *insn) ++{ ++ rtx pat = single_set (insn); ++ if (!pat) ++ return NULL_RTX; ++ ++ return SET_DEST (pat); ++} ++ ++/* Check if the instruction is single set of suitable opcode. ++ Also gather its source and destination patterns. */ ++template ++static bool ++check_simple_op (rtx_insn *insn, rtx &src, rtx &dst) ++{ ++ rtx pat = single_set (insn); ++ if (!pat) ++ return false; ++ ++ src = SET_SRC (pat); ++ dst = SET_DEST (pat); ++ ++ if (GET_CODE (src) != OP) ++ return false; ++ ++ return true; ++} ++ ++/* Minimal term info of the RTL matcher. All of the custom matchers should ++ inherit from it. ++ ++ It stores information about matched pattern, instruction ++ of its location and predicate if the matched term represents operator ++ inside the matched tree. */ ++struct minimal_term_info ++{ ++ minimal_term_info () ++ {} ++ minimal_term_info (rtx_insn *loc, rtx src, bool is_op = false) ++ : loc (loc), src (src), is_op (is_op) ++ {} ++ ++ rtx_insn *loc; ++ rtx src; ++ bool is_op; ++}; ++ ++/* Term info for memory matcher. */ ++struct mem_term_info : minimal_term_info ++{ ++ mem_term_info () ++ {} ++ mem_term_info (rtx_insn *loc, rtx src, unsigned HOST_WIDE_INT offset = 0) ++ : minimal_term_info (loc, src), offset (offset) ++ {} ++ ++ unsigned HOST_WIDE_INT offset; ++}; ++ ++/* A wrapper being used to turn a term into a matcher-like entity. */ ++template ++struct matcher_term ++{ ++ /* Required storage size information of the matcher. */ ++ static const int holder_size = 1; ++ static const int op_num = 0; ++ typedef T term_type; ++ typedef term_type holder_type[holder_size]; ++}; ++ ++/* Simple matcher of patterns of suitable opcode. */ ++template ++struct arg_op_matcher : matcher_term ++{ ++ typedef typename matcher_term::holder_type holder_type; ++ ++ static bool match (rtx_insn *, holder_type &) ++ { ++ return false; ++ } ++ ++ static bool match (rtx src, rtx_insn *insn, holder_type &m) ++ { ++ if (GET_CODE (src) != ARGOP) ++ return false; ++ ++ static_cast (m[0]) = minimal_term_info (insn, src); ++ return true; ++ } ++}; ++ ++/* Simple matcher of integer constants. */ ++template ++struct int_cst_matcher : arg_op_matcher ++{}; ++ ++/* Unary operator matcher. */ ++template ++struct unop_matcher ++{ ++ /* Required storage size information of the matcher. */ ++ static const int holder_size = ARG::holder_size + store_op; ++ static const int op_num = ARG::op_num + store_op; ++ typedef typename ARG::term_type term_type; ++ typedef term_type holder_type[holder_size]; ++ ++ static bool match (rtx_insn *insn, holder_type &m) ++ { ++ rtx src = get_single_set_op (insn); ++ return src && match (src, insn, m); ++ } ++ ++ static bool match (rtx src, rtx_insn *insn, holder_type &m) ++ { ++ if (REG_P (src)) ++ { ++ insn = get_single_def_insn (insn, src); ++ if (insn && (src = single_set (insn))) ++ src = SET_SRC (src); ++ } ++ ++ if (!src || !insn || (GET_CODE (src) != OP1 && GET_CODE (src) != OP2)) ++ return false; ++ ++ /* Store current operation if needed. */ ++ if (store_op) ++ static_cast (m[0]) = minimal_term_info (insn, src, ++ true); ++ ++ rtx op = XEXP (src, 0); ++ rtx_insn *def = get_single_def_insn (insn, op); ++ typename ARG::holder_type &m_arg ++ = (typename ARG::holder_type &) *(m + store_op); ++ return (def && ARG::match (def, m_arg)) || ARG::match (op, insn, m_arg); ++ } ++}; ++ ++/* Binary operator matcher. */ ++template ++struct binop_matcher ++{ ++ /* Required storage size information of the matcher. */ ++ static const int holder_size = LHS::holder_size + RHS::holder_size + store_op; ++ static const int op_num = LHS::op_num + RHS::op_num + store_op; ++ typedef typename LHS::term_type term_type; ++ typedef term_type holder_type[holder_size]; ++ ++ static bool match (rtx_insn *insn, holder_type &m) ++ { ++ rtx src = get_single_set_op (insn); ++ return src && match (src, insn, m); ++ } ++ ++ static bool match (rtx src, rtx_insn *insn, holder_type &m) ++ { ++ if (GET_CODE (src) != OP1 && GET_CODE (src) != OP2) ++ return false; ++ ++ /* Store current operation if needed. */ ++ if (store_op) ++ static_cast (m[0]) = minimal_term_info (insn, src, ++ true); ++ ++ rtx lhs_op = XEXP (src, 0); ++ rtx rhs_op = XEXP (src, 1); ++ rtx_insn *lhs_def = get_single_def_insn (insn, lhs_op); ++ rtx_insn *rhs_def = get_single_def_insn (insn, rhs_op); ++ ++ return match (lhs_def, rhs_def, lhs_op, rhs_op, insn, m) ++ || (COMMUTATIVE && match (rhs_def, lhs_def, rhs_op, lhs_op, insn, m)); ++ } ++ ++private: ++ static bool match (rtx_insn *lhs_def, rtx_insn *rhs_def, ++ rtx lhs_op, rtx rhs_op, rtx_insn *insn, ++ holder_type &m) ++ { ++ /* Force template instantiation error on non-matching types. */ ++ gcc_assert ((typename LHS::term_type *) NULL ++ == (typename RHS::term_type *) NULL); ++ ++ /* Obtain locations in the storage. */ ++ typename LHS::holder_type &m_lhs ++ = (typename LHS::holder_type &) *(m + store_op); ++ typename RHS::holder_type &m_rhs ++ = (typename RHS::holder_type &) *(m + store_op ++ + LHS::holder_size); ++ ++ /* Try match both instructions. */ ++ if (lhs_def && rhs_def && LHS::match (lhs_def, m_lhs) ++ && RHS::match (rhs_def, m_rhs)) ++ return true; ++ /* Try match instruction and pattern. */ ++ else if (lhs_def && LHS::match (lhs_def, m_lhs) ++ && RHS::match (rhs_op, insn, m_rhs)) ++ return true; ++ /* Try match pattern and instruction. */ ++ else if (rhs_def && LHS::match (lhs_op, insn, m_lhs) ++ && RHS::match (rhs_def, m_rhs)) ++ return true; ++ /* Try match both patterns. */ ++ else ++ return LHS::match (lhs_op, insn, m_lhs) ++ && RHS::match (rhs_op, insn, m_rhs); ++ } ++}; ++ ++#endif // GCC_RTL_MATCHER_H +diff --git a/gcc/target.def b/gcc/target.def +index 8797a21d5..c9bb2b4c2 100644 +--- a/gcc/target.def ++++ b/gcc/target.def +@@ -2693,6 +2693,47 @@ contains post-index or pre-index operation.", + bool, (int icode, bool *has_wb), + NULL) + ++DEFHOOK ++(get_v16qi_mode, ++ "This function get the 16 byte elements vector mode if target supports this.", ++ machine_mode, (), ++ NULL) ++ ++DEFHOOK ++(gen_rev32v16qi, ++ "This function generate the byte reverse instruction\n\ ++ of 16 byte elements vector if target supports this.", ++ rtx, (rtx dest, rtx src), ++ NULL) ++ ++DEFHOOK ++(gen_aesev16qi, ++ "This function generate the AES encryption instruction\n\ ++ of 16 byte elements vector if target supports this.", ++ rtx, (rtx dest, rtx src1, rtx src2), ++ NULL) ++ ++DEFHOOK ++(gen_aesdv16qi, ++ "This function generate the AES decryption instruction\n\ ++ of 16 byte elements vector if target supports this.", ++ rtx, (rtx dest, rtx src1, rtx src2), ++ NULL) ++ ++DEFHOOK ++(gen_aesmcv16qi, ++ "This function generate the AES mix columns instruction\n\ ++ of 16 byte elements vector if target supports this.", ++ rtx, (rtx dest, rtx src), ++ NULL) ++ ++DEFHOOK ++(gen_aesimcv16qi, ++ "This function generate the AES inversed mix columns instruction\n\ ++ of 16 byte elements vector if target supports this.", ++ rtx, (rtx dest, rtx src), ++ NULL) ++ + DEFHOOK + (gen_ccmp_first, + "This function prepares to emit a comparison insn for the first compare in a\n\ +diff --git a/gcc/testsuite/gcc.target/aarch64/aes-decrypt.c b/gcc/testsuite/gcc.target/aarch64/aes-decrypt.c +new file mode 100644 +index 000000000..966ec5532 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/aarch64/aes-decrypt.c +@@ -0,0 +1,478 @@ ++/* { dg-do run } */ ++/* { dg-options "-O3 -fno-inline --save-temps -fcrypto-accel-aes -march=armv8.2-a+lse+crypto" } */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++typedef uint8_t u8; ++typedef uint32_t u32; ++ ++static const u32 Td0[256] = { ++ 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U, ++ 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U, ++ 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U, ++ 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU, ++ 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U, ++ 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U, ++ 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU, ++ 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U, ++ 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU, ++ 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U, ++ 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U, ++ 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U, ++ 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U, ++ 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU, ++ 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U, ++ 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU, ++ 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U, ++ 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU, ++ 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U, ++ 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U, ++ 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U, ++ 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU, ++ 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U, ++ 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU, ++ 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U, ++ 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU, ++ 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U, ++ 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU, ++ 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU, ++ 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U, ++ 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU, ++ 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U, ++ 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU, ++ 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U, ++ 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U, ++ 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U, ++ 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU, ++ 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U, ++ 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U, ++ 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU, ++ 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U, ++ 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U, ++ 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U, ++ 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U, ++ 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U, ++ 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU, ++ 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U, ++ 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U, ++ 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U, ++ 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U, ++ 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U, ++ 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU, ++ 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU, ++ 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU, ++ 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU, ++ 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U, ++ 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U, ++ 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU, ++ 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU, ++ 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U, ++ 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU, ++ 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U, ++ 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U, ++ 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U, ++}; ++ ++static const u32 Td1[256] = { ++ 0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU, ++ 0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U, ++ 0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU, ++ 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U, ++ 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U, ++ 0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U, ++ 0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U, ++ 0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U, ++ 0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U, ++ 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU, ++ 0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU, ++ 0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU, ++ 0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U, ++ 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU, ++ 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U, ++ 0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U, ++ 0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U, ++ 0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU, ++ 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU, ++ 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U, ++ 0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU, ++ 0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U, ++ 0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU, ++ 0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU, ++ 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U, ++ 0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U, ++ 0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U, ++ 0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU, ++ 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U, ++ 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU, ++ 0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U, ++ 0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U, ++ 0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U, ++ 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU, ++ 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U, ++ 0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U, ++ 0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U, ++ 0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U, ++ 0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U, ++ 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U, ++ 0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU, ++ 0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU, ++ 0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U, ++ 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU, ++ 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U, ++ 0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU, ++ 0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU, ++ 0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U, ++ 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU, ++ 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U, ++ 0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U, ++ 0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U, ++ 0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U, ++ 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U, ++ 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U, ++ 0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U, ++ 0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU, ++ 0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U, ++ 0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U, ++ 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU, ++ 0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U, ++ 0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U, ++ 0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U, ++ 0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U, ++}; ++ ++static const u32 Td2[256] = { ++ 0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U, ++ 0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U, ++ 0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U, ++ 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U, ++ 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU, ++ 0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U, ++ 0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U, ++ 0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U, ++ 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U, ++ 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU, ++ 0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U, ++ 0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U, ++ 0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU, ++ 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U, ++ 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U, ++ 0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U, ++ 0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U, ++ 0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U, ++ 0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U, ++ 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU, ++ 0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U, ++ 0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U, ++ 0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U, ++ 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U, ++ 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U, ++ 0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU, ++ 0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU, ++ 0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U, ++ 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU, ++ 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U, ++ 0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU, ++ 0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU, ++ 0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU, ++ 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU, ++ 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U, ++ 0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U, ++ 0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U, ++ 0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U, ++ 0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U, ++ 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U, ++ 0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U, ++ 0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU, ++ 0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU, ++ 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U, ++ 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U, ++ 0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU, ++ 0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU, ++ 0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U, ++ 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U, ++ 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U, ++ 0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U, ++ 0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U, ++ 0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U, ++ 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U, ++ 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU, ++ 0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U, ++ 0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U, ++ 0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U, ++ 0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U, ++ 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U, ++ 0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U, ++ 0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU, ++ 0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U, ++ 0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U, ++}; ++ ++static const u32 Td3[256] = { ++ 0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU, ++ 0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU, ++ 0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U, ++ 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U, ++ 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU, ++ 0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU, ++ 0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U, ++ 0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU, ++ 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U, ++ 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU, ++ 0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U, ++ 0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U, ++ 0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U, ++ 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U, ++ 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U, ++ 0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU, ++ 0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU, ++ 0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U, ++ 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U, ++ 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU, ++ 0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU, ++ 0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U, ++ 0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U, ++ 0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U, ++ 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U, ++ 0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU, ++ 0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U, ++ 0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U, ++ 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU, ++ 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU, ++ 0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U, ++ 0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U, ++ 0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U, ++ 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU, ++ 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U, ++ 0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U, ++ 0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U, ++ 0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U, ++ 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U, ++ 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U, ++ 0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U, ++ 0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU, ++ 0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U, ++ 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U, ++ 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU, ++ 0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU, ++ 0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U, ++ 0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU, ++ 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U, ++ 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U, ++ 0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U, ++ 0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U, ++ 0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U, ++ 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U, ++ 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU, ++ 0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU, ++ 0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU, ++ 0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU, ++ 0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U, ++ 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U, ++ 0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U, ++ 0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU, ++ 0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U, ++ 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U, ++}; ++ ++static const u8 Td4[256] = { ++ 0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U, ++ 0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU, ++ 0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U, ++ 0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU, ++ 0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU, ++ 0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU, ++ 0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U, ++ 0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U, ++ 0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U, ++ 0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U, ++ 0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU, ++ 0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U, ++ 0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU, ++ 0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U, ++ 0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U, ++ 0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU, ++ 0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU, ++ 0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U, ++ 0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U, ++ 0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU, ++ 0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U, ++ 0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU, ++ 0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U, ++ 0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U, ++ 0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U, ++ 0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU, ++ 0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU, ++ 0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU, ++ 0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U, ++ 0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U, ++ 0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U, ++ 0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU, ++}; ++ ++#define GETU32(pt) \ ++ ( \ ++ ((u32)(pt)[0] << 24) \ ++ ^ ((u32)(pt)[1] << 16) \ ++ ^ ((u32)(pt)[2] << 8) \ ++ ^ ((u32)(pt)[3]) \ ++ ) ++ ++#define PUTU32(ct, st) \ ++ { \ ++ (ct)[0] = (u8)((st) >> 24); \ ++ (ct)[1] = (u8)((st) >> 16); \ ++ (ct)[2] = (u8)((st) >> 8); \ ++ (ct)[3] = (u8)(st); \ ++ } ++ ++void ++aes_decrypt (const unsigned char *in, unsigned char *out, ++ const u32 *rk, int nr) ++{ ++ u32 s0, s1, s2, s3, t0, t1, t2, t3; ++ ++ int r = nr >> 1; ++ ++ s0 = GETU32 (in ) ^ rk[0]; ++ s1 = GETU32 (in + 4) ^ rk[1]; ++ s2 = GETU32 (in + 8) ^ rk[2]; ++ s3 = GETU32 (in + 12) ^ rk[3]; ++ ++ for (;;) { ++ t0 = ++ Td0[(s0 >> 24) ] ^ ++ Td1[(s3 >> 16) & 0xff] ^ ++ Td2[(s2 >> 8) & 0xff] ^ ++ Td3[(s1 ) & 0xff] ^ ++ rk[4]; ++ t1 = ++ Td0[(s1 >> 24) ] ^ ++ Td1[(s0 >> 16) & 0xff] ^ ++ Td2[(s3 >> 8) & 0xff] ^ ++ Td3[(s2 ) & 0xff] ^ ++ rk[5]; ++ t2 = ++ Td0[(s2 >> 24) ] ^ ++ Td1[(s1 >> 16) & 0xff] ^ ++ Td2[(s0 >> 8) & 0xff] ^ ++ Td3[(s3 ) & 0xff] ^ ++ rk[6]; ++ t3 = ++ Td0[(s3 >> 24) ] ^ ++ Td1[(s2 >> 16) & 0xff] ^ ++ Td2[(s1 >> 8) & 0xff] ^ ++ Td3[(s0 ) & 0xff] ^ ++ rk[7]; ++ ++ rk += 8; ++ if (--r == 0) { ++ break; ++ } ++ ++ s0 = ++ Td0[(t0 >> 24) ] ^ ++ Td1[(t3 >> 16) & 0xff] ^ ++ Td2[(t2 >> 8) & 0xff] ^ ++ Td3[(t1 ) & 0xff] ^ ++ rk[0]; ++ s1 = ++ Td0[(t1 >> 24) ] ^ ++ Td1[(t0 >> 16) & 0xff] ^ ++ Td2[(t3 >> 8) & 0xff] ^ ++ Td3[(t2 ) & 0xff] ^ ++ rk[1]; ++ s2 = ++ Td0[(t2 >> 24) ] ^ ++ Td1[(t1 >> 16) & 0xff] ^ ++ Td2[(t0 >> 8) & 0xff] ^ ++ Td3[(t3 ) & 0xff] ^ ++ rk[2]; ++ s3 = ++ Td0[(t3 >> 24) ] ^ ++ Td1[(t2 >> 16) & 0xff] ^ ++ Td2[(t1 >> 8) & 0xff] ^ ++ Td3[(t0 ) & 0xff] ^ ++ rk[3]; ++ } ++ ++ s0 = ++ ((u32)Td4[(t0 >> 24) ] << 24) ^ ++ ((u32)Td4[(t3 >> 16) & 0xff] << 16) ^ ++ ((u32)Td4[(t2 >> 8) & 0xff] << 8) ^ ++ ((u32)Td4[(t1 ) & 0xff]) ^ ++ rk[0]; ++ PUTU32 (out , s0); ++ ++ s1 = ++ ((u32)Td4[(t1 >> 24) ] << 24) ^ ++ ((u32)Td4[(t0 >> 16) & 0xff] << 16) ^ ++ ((u32)Td4[(t3 >> 8) & 0xff] << 8) ^ ++ ((u32)Td4[(t2 ) & 0xff]) ^ ++ rk[1]; ++ PUTU32 (out + 4, s1); ++ ++ s2 = ++ ((u32)Td4[(t2 >> 24) ] << 24) ^ ++ ((u32)Td4[(t1 >> 16) & 0xff] << 16) ^ ++ ((u32)Td4[(t0 >> 8) & 0xff] << 8) ^ ++ ((u32)Td4[(t3 ) & 0xff]) ^ ++ rk[2]; ++ PUTU32 (out + 8, s2); ++ ++ s3 = ++ ((u32)Td4[(t3 >> 24) ] << 24) ^ ++ ((u32)Td4[(t2 >> 16) & 0xff] << 16) ^ ++ ((u32)Td4[(t1 >> 8) & 0xff] << 8) ^ ++ ((u32)Td4[(t0 ) & 0xff]) ^ ++ rk[3]; ++ PUTU32 (out + 12, s3); ++} ++ ++int main () ++{ ++ const u8 input[16] = { 0x39, 0x25, 0x84, 0x1d, 0x02, 0xdc, 0x09, 0xfb, ++ 0xdc, 0x11, 0x85, 0x97, 0x19, 0x6a, 0x0b, 0x32 }; ++ ++ const u8 expected[16] = { 0x32, 0x43, 0xf6, 0xa8, 0x88, 0x5a, 0x30, 0x8d, ++ 0x31, 0x31, 0x98, 0xa2, 0xe0, 0x37, 0x07, 0x34 }; ++ ++ const u8 key[] = { 0xa8, 0xf9, 0x14, 0xd0, 0x89, 0x25, 0xee, 0xc9, ++ 0xc8, 0x0c, 0x3f, 0xe1, 0xa6, 0x0c, 0x63, 0xb6, ++ 0x63, 0x5a, 0x7b, 0x0c, 0xfe, 0xea, 0x19, 0x13, ++ 0x90, 0x88, 0x39, 0xb0, 0xb4, 0xfb, 0x4c, 0x66, ++ 0x5a, 0x92, 0x7d, 0xdf, 0x9d, 0xb0, 0x62, 0x1f, ++ 0x6e, 0x62, 0x20, 0xa3, 0x24, 0x73, 0x75, 0xd6, ++ 0x47, 0x76, 0xc0, 0x12, 0xc7, 0x22, 0x1f, 0xc0, ++ 0xf3, 0xd2, 0x42, 0xbc, 0x4a, 0x11, 0x55, 0x75, ++ 0x76, 0xd8, 0xfc, 0x6e, 0x80, 0x54, 0xdf, 0xd2, ++ 0x34, 0xf0, 0x5d, 0x7c, 0xb9, 0xc3, 0x17, 0xc9, ++ 0xfc, 0x0a, 0xa3, 0x6e, 0xf6, 0x8c, 0x23, 0xbc, ++ 0xb4, 0xa4, 0x82, 0xae, 0x8d, 0x33, 0x4a, 0xb5, ++ 0x13, 0x44, 0x88, 0x90, 0x0a, 0x86, 0x80, 0xd2, ++ 0x42, 0x28, 0xa1, 0x12, 0x39, 0x97, 0xc8, 0x1b, ++ 0xf7, 0x13, 0x1f, 0x7c, 0x19, 0xc2, 0x08, 0x42, ++ 0x48, 0xae, 0x21, 0xc0, 0x7b, 0xbf, 0x69, 0x09, ++ 0xeb, 0x05, 0x75, 0xcc, 0xee, 0xd1, 0x17, 0x3e, ++ 0x51, 0x6c, 0x29, 0x82, 0x33, 0x11, 0x48, 0xc9, ++ 0xa7, 0x08, 0x37, 0x2b, 0x05, 0xd4, 0x62, 0xf2, ++ 0xbf, 0xbd, 0x3e, 0xbc, 0x62, 0x7d, 0x61, 0x4b, ++ 0x16, 0x15, 0x7e, 0x2b, 0xa6, 0xd2, 0xae, 0x28, ++ 0x88, 0x15, 0xf7, 0xab, 0x3c, 0x4f, 0xcf, 0x09 }; ++ ++ u8 output[16] = { 0 }; ++ ++ aes_decrypt (input, output, (u32*) key, 10); ++ ++ if (memcmp (output, expected, 16) != 0) ++ abort (); ++ ++ return 0; ++} ++ ++/* { dg-final { scan-assembler "rev32" } } */ ++/* { dg-final { scan-assembler "aesimc" } } */ ++/* { dg-final { scan-assembler "aesd" } } */ +diff --git a/gcc/testsuite/gcc.target/aarch64/aes-encrypt.c b/gcc/testsuite/gcc.target/aarch64/aes-encrypt.c +new file mode 100644 +index 000000000..e3f3c446f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/aarch64/aes-encrypt.c +@@ -0,0 +1,443 @@ ++/* { dg-do run } */ ++/* { dg-options "-O3 -fno-inline --save-temps -fcrypto-accel-aes -march=armv8.2-a+lse+crypto" } */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++typedef uint8_t u8; ++typedef uint32_t u32; ++ ++static const u32 Te0[256] = { ++ 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, ++ 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U, ++ 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU, ++ 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU, ++ 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U, ++ 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU, ++ 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU, ++ 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU, ++ 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU, ++ 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU, ++ 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U, ++ 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU, ++ 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU, ++ 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U, ++ 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU, ++ 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU, ++ 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU, ++ 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU, ++ 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU, ++ 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U, ++ 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU, ++ 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU, ++ 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU, ++ 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU, ++ 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U, ++ 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U, ++ 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U, ++ 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U, ++ 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU, ++ 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U, ++ 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U, ++ 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU, ++ 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU, ++ 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U, ++ 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U, ++ 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U, ++ 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU, ++ 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U, ++ 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU, ++ 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U, ++ 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU, ++ 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U, ++ 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U, ++ 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU, ++ 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U, ++ 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U, ++ 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U, ++ 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U, ++ 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U, ++ 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U, ++ 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U, ++ 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U, ++ 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU, ++ 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U, ++ 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U, ++ 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U, ++ 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U, ++ 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U, ++ 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U, ++ 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU, ++ 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U, ++ 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U, ++ 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U, ++ 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU, ++}; ++ ++static const u32 Te1[256] = { ++ 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU, ++ 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U, ++ 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU, ++ 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U, ++ 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU, ++ 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U, ++ 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU, ++ 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U, ++ 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U, ++ 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU, ++ 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U, ++ 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U, ++ 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U, ++ 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU, ++ 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U, ++ 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U, ++ 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU, ++ 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U, ++ 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U, ++ 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U, ++ 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU, ++ 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU, ++ 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U, ++ 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU, ++ 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU, ++ 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U, ++ 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU, ++ 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U, ++ 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU, ++ 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U, ++ 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U, ++ 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U, ++ 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU, ++ 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U, ++ 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU, ++ 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U, ++ 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU, ++ 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U, ++ 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U, ++ 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU, ++ 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU, ++ 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU, ++ 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U, ++ 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U, ++ 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU, ++ 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U, ++ 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU, ++ 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U, ++ 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU, ++ 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U, ++ 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU, ++ 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU, ++ 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U, ++ 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU, ++ 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U, ++ 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU, ++ 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U, ++ 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U, ++ 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U, ++ 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU, ++ 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU, ++ 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U, ++ 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU, ++ 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U, ++}; ++ ++static const u32 Te2[256] = { ++ 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU, ++ 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U, ++ 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU, ++ 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U, ++ 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU, ++ 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U, ++ 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU, ++ 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U, ++ 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U, ++ 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU, ++ 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U, ++ 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U, ++ 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U, ++ 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU, ++ 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U, ++ 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U, ++ 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU, ++ 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U, ++ 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U, ++ 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U, ++ 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU, ++ 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU, ++ 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U, ++ 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU, ++ 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU, ++ 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U, ++ 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU, ++ 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U, ++ 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU, ++ 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U, ++ 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U, ++ 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U, ++ 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU, ++ 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U, ++ 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU, ++ 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U, ++ 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU, ++ 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U, ++ 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U, ++ 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU, ++ 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU, ++ 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU, ++ 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U, ++ 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U, ++ 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU, ++ 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U, ++ 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU, ++ 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U, ++ 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU, ++ 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U, ++ 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU, ++ 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU, ++ 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U, ++ 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU, ++ 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U, ++ 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU, ++ 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U, ++ 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U, ++ 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U, ++ 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU, ++ 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU, ++ 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U, ++ 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU, ++ 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U, ++}; ++ ++static const u32 Te3[256] = { ++ 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U, ++ 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U, ++ 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U, ++ 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU, ++ 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU, ++ 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU, ++ 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U, ++ 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU, ++ 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU, ++ 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U, ++ 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U, ++ 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU, ++ 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU, ++ 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU, ++ 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU, ++ 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU, ++ 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U, ++ 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU, ++ 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU, ++ 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U, ++ 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U, ++ 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U, ++ 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U, ++ 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U, ++ 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU, ++ 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U, ++ 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU, ++ 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU, ++ 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U, ++ 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U, ++ 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U, ++ 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU, ++ 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U, ++ 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU, ++ 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU, ++ 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U, ++ 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U, ++ 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU, ++ 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U, ++ 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU, ++ 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U, ++ 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U, ++ 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U, ++ 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U, ++ 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU, ++ 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U, ++ 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU, ++ 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U, ++ 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU, ++ 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U, ++ 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU, ++ 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU, ++ 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU, ++ 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU, ++ 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U, ++ 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U, ++ 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U, ++ 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U, ++ 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U, ++ 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U, ++ 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU, ++ 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U, ++ 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU, ++ 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU, ++}; ++ ++#define GETU32(pt) \ ++ ( \ ++ ((u32)(pt)[0] << 24) \ ++ ^ ((u32)(pt)[1] << 16) \ ++ ^ ((u32)(pt)[2] << 8) \ ++ ^ ((u32)(pt)[3]) \ ++ ) ++ ++#define PUTU32(ct, st) \ ++ { \ ++ (ct)[0] = (u8)((st) >> 24); \ ++ (ct)[1] = (u8)((st) >> 16); \ ++ (ct)[2] = (u8)((st) >> 8); \ ++ (ct)[3] = (u8)(st); \ ++ } ++ ++void ++aes_encrypt (const unsigned char *in, unsigned char *out, ++ const u32 *rk, int nr) ++{ ++ u32 s0, s1, s2, s3, t0, t1, t2, t3; ++ ++ int r = nr >> 1; ++ ++ s0 = GETU32 (in ) ^ rk[0]; ++ s1 = GETU32 (in + 4) ^ rk[1]; ++ s2 = GETU32 (in + 8) ^ rk[2]; ++ s3 = GETU32 (in + 12) ^ rk[3]; ++ ++ for (;;) { ++ t0 = ++ Te0[(s0 >> 24) ] ^ ++ Te1[(s1 >> 16) & 0xff] ^ ++ Te2[(s2 >> 8) & 0xff] ^ ++ Te3[(s3 ) & 0xff] ^ ++ rk[4]; ++ t1 = ++ Te0[(s1 >> 24) ] ^ ++ Te1[(s2 >> 16) & 0xff] ^ ++ Te2[(s3 >> 8) & 0xff] ^ ++ Te3[(s0 ) & 0xff] ^ ++ rk[5]; ++ t2 = ++ Te0[(s2 >> 24) ] ^ ++ Te1[(s3 >> 16) & 0xff] ^ ++ Te2[(s0 >> 8) & 0xff] ^ ++ Te3[(s1 ) & 0xff] ^ ++ rk[6]; ++ t3 = ++ Te0[(s3 >> 24) ] ^ ++ Te1[(s0 >> 16) & 0xff] ^ ++ Te2[(s1 >> 8) & 0xff] ^ ++ Te3[(s2 ) & 0xff] ^ ++ rk[7]; ++ ++ rk += 8; ++ if (--r == 0) ++ break; ++ ++ s0 = ++ Te0[(t0 >> 24) ] ^ ++ Te1[(t1 >> 16) & 0xff] ^ ++ Te2[(t2 >> 8) & 0xff] ^ ++ Te3[(t3 ) & 0xff] ^ ++ rk[0]; ++ s1 = ++ Te0[(t1 >> 24) ] ^ ++ Te1[(t2 >> 16) & 0xff] ^ ++ Te2[(t3 >> 8) & 0xff] ^ ++ Te3[(t0 ) & 0xff] ^ ++ rk[1]; ++ s2 = ++ Te0[(t2 >> 24) ] ^ ++ Te1[(t3 >> 16) & 0xff] ^ ++ Te2[(t0 >> 8) & 0xff] ^ ++ Te3[(t1 ) & 0xff] ^ ++ rk[2]; ++ s3 = ++ Te0[(t3 >> 24) ] ^ ++ Te1[(t0 >> 16) & 0xff] ^ ++ Te2[(t1 >> 8) & 0xff] ^ ++ Te3[(t2 ) & 0xff] ^ ++ rk[3]; ++ } ++ ++ s0 = ++ (Te2[(t0 >> 24) ] & 0xff000000) ^ ++ (Te3[(t1 >> 16) & 0xff] & 0x00ff0000) ^ ++ (Te0[(t2 >> 8) & 0xff] & 0x0000ff00) ^ ++ (Te1[(t3 ) & 0xff] & 0x000000ff) ^ ++ rk[0]; ++ PUTU32 (out , s0); ++ ++ s1 = ++ (Te2[(t1 >> 24) ] & 0xff000000) ^ ++ (Te3[(t2 >> 16) & 0xff] & 0x00ff0000) ^ ++ (Te0[(t3 >> 8) & 0xff] & 0x0000ff00) ^ ++ (Te1[(t0 ) & 0xff] & 0x000000ff) ^ ++ rk[1]; ++ PUTU32 (out + 4, s1); ++ ++ s2 = ++ (Te2[(t2 >> 24) ] & 0xff000000) ^ ++ (Te3[(t3 >> 16) & 0xff] & 0x00ff0000) ^ ++ (Te0[(t0 >> 8) & 0xff] & 0x0000ff00) ^ ++ (Te1[(t1 ) & 0xff] & 0x000000ff) ^ ++ rk[2]; ++ PUTU32 (out + 8, s2); ++ ++ s3 = ++ (Te2[(t3 >> 24) ] & 0xff000000) ^ ++ (Te3[(t0 >> 16) & 0xff] & 0x00ff0000) ^ ++ (Te0[(t1 >> 8) & 0xff] & 0x0000ff00) ^ ++ (Te1[(t2 ) & 0xff] & 0x000000ff) ^ ++ rk[3]; ++ PUTU32 (out + 12, s3); ++} ++ ++ ++int main () ++{ ++ const u8 input[16] = { 0x32, 0x43, 0xf6, 0xa8, 0x88, 0x5a, 0x30, 0x8d, ++ 0x31, 0x31, 0x98, 0xa2, 0xe0, 0x37, 0x07, 0x34 }; ++ ++ const u8 expected[16] = { 0x39, 0x25, 0x84, 0x1d, 0x02, 0xdc, 0x09, 0xfb, ++ 0xdc, 0x11, 0x85, 0x97, 0x19, 0x6a, 0x0b, 0x32 }; ++ ++ const u8 key[] = { 0x16, 0x15, 0x7e, 0x2b, 0xa6, 0xd2, 0xae, 0x28, ++ 0x88, 0x15, 0xf7, 0xab, 0x3c, 0x4f, 0xcf, 0x09, ++ 0x17, 0xfe, 0xfa, 0xa0, 0xb1, 0x2c, 0x54, 0x88, ++ 0x39, 0x39, 0xa3, 0x23, 0x05, 0x76, 0x6c, 0x2a, ++ 0xf2, 0x95, 0xc2, 0xf2, 0x43, 0xb9, 0x96, 0x7a, ++ 0x7a, 0x80, 0x35, 0x59, 0x7f, 0xf6, 0x59, 0x73, ++ 0x7d, 0x47, 0x80, 0x3d, 0x3e, 0xfe, 0x16, 0x47, ++ 0x44, 0x7e, 0x23, 0x1e, 0x3b, 0x88, 0x7a, 0x6d, ++ 0x41, 0xa5, 0x44, 0xef, 0x7f, 0x5b, 0x52, 0xa8, ++ 0x3b, 0x25, 0x71, 0xb6, 0x00, 0xad, 0x0b, 0xdb, ++ 0xf8, 0xc6, 0xd1, 0xd4, 0x87, 0x9d, 0x83, 0x7c, ++ 0xbc, 0xb8, 0xf2, 0xca, 0xbc, 0x15, 0xf9, 0x11, ++ 0x7a, 0xa3, 0x88, 0x6d, 0xfd, 0x3e, 0x0b, 0x11, ++ 0x41, 0x86, 0xf9, 0xdb, 0xfd, 0x93, 0x00, 0xca, ++ 0x0e, 0xf7, 0x54, 0x4e, 0xf3, 0xc9, 0x5f, 0x5f, ++ 0xb2, 0x4f, 0xa6, 0x84, 0x4f, 0xdc, 0xa6, 0x4e, ++ 0x21, 0x73, 0xd2, 0xea, 0xd2, 0xba, 0x8d, 0xb5, ++ 0x60, 0xf5, 0x2b, 0x31, 0x2f, 0x29, 0x8d, 0x7f, ++ 0xf3, 0x66, 0x77, 0xac, 0x21, 0xdc, 0xfa, 0x19, ++ 0x41, 0x29, 0xd1, 0x28, 0x6e, 0x00, 0x5c, 0x57, ++ 0xa8, 0xf9, 0x14, 0xd0, 0x89, 0x25, 0xee, 0xc9, ++ 0xc8, 0x0c, 0x3f, 0xe1, 0xa6, 0x0c, 0x63, 0xb6 }; ++ ++ u8 output[16] = { 0 }; ++ ++ aes_encrypt (input, output, (u32*) key, 10); ++ ++ if (memcmp (output, expected, 16) != 0) ++ abort (); ++ ++ return 0; ++} ++ ++/* { dg-final { scan-assembler "rev32" } } */ ++/* { dg-final { scan-assembler "aesmc" } } */ ++/* { dg-final { scan-assembler "aese" } } */ +diff --git a/gcc/timevar.def b/gcc/timevar.def +index 2ccecffb5..18a9f62cc 100644 +--- a/gcc/timevar.def ++++ b/gcc/timevar.def +@@ -261,6 +261,7 @@ DEFTIMEVAR (TV_AUTO_INC_DEC , "auto inc dec") + DEFTIMEVAR (TV_CSE2 , "CSE 2") + DEFTIMEVAR (TV_BRANCH_PROB , "branch prediction") + DEFTIMEVAR (TV_COMBINE , "combiner") ++DEFTIMEVAR (TV_CRYPTO_ACCEL , "crypto accel") + DEFTIMEVAR (TV_IFCVT , "if-conversion") + DEFTIMEVAR (TV_MODE_SWITCH , "mode switching") + DEFTIMEVAR (TV_SMS , "sms modulo scheduling") +diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h +index 6daac7fc1..1733931c3 100644 +--- a/gcc/tree-pass.h ++++ b/gcc/tree-pass.h +@@ -583,6 +583,7 @@ extern rtl_opt_pass *make_pass_cse2 (gcc::context *ctxt); + extern rtl_opt_pass *make_pass_df_initialize_opt (gcc::context *ctxt); + extern rtl_opt_pass *make_pass_df_initialize_no_opt (gcc::context *ctxt); + extern rtl_opt_pass *make_pass_reginfo_init (gcc::context *ctxt); ++extern rtl_opt_pass *make_pass_crypto_accel (gcc::context *ctxt); + extern rtl_opt_pass *make_pass_inc_dec (gcc::context *ctxt); + extern rtl_opt_pass *make_pass_stack_ptr_mod (gcc::context *ctxt); + extern rtl_opt_pass *make_pass_initialize_regs (gcc::context *ctxt); +-- +2.33.0 + diff --git a/0048-crypto-accel-add-optimization-level-requirement-to-t.patch b/0048-crypto-accel-add-optimization-level-requirement-to-t.patch new file mode 100644 index 0000000000000000000000000000000000000000..49dfc1d3b8009ddce6d5752959f9825a0ee025fa --- /dev/null +++ b/0048-crypto-accel-add-optimization-level-requirement-to-t.patch @@ -0,0 +1,27 @@ +From 915d549b03c10ab403538888149facd417a02ebc Mon Sep 17 00:00:00 2001 +From: vchernon +Date: Wed, 27 Dec 2023 23:31:26 +0800 +Subject: [PATCH 16/18] [crypto-accel] add optimization level requirement to + the gate + +fix issue (src-openEuler/gcc: I8RRDW) +--- + gcc/crypto-accel.cc | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/crypto-accel.cc b/gcc/crypto-accel.cc +index f4e810a6b..e7766a585 100644 +--- a/gcc/crypto-accel.cc ++++ b/gcc/crypto-accel.cc +@@ -2391,7 +2391,7 @@ public: + /* opt_pass methods: */ + virtual bool gate (function *) + { +- if (flag_crypto_accel_aes <= 0) ++ if (flag_crypto_accel_aes <= 0 || optimize < 1) + return false; + return targetm.get_v16qi_mode + && targetm.gen_rev32v16qi +-- +2.33.0 + diff --git a/0049-Add-more-flexible-check-for-pointer-aliasing-during-.patch b/0049-Add-more-flexible-check-for-pointer-aliasing-during-.patch new file mode 100644 index 0000000000000000000000000000000000000000..df88789c75c5829bff8dee4c5a4e7d817af2227d --- /dev/null +++ b/0049-Add-more-flexible-check-for-pointer-aliasing-during-.patch @@ -0,0 +1,239 @@ +From b5865aef36ebaac87ae30d51f08bfe081795ed67 Mon Sep 17 00:00:00 2001 +From: Chernonog Viacheslav +Date: Tue, 12 Mar 2024 23:30:56 +0800 +Subject: [PATCH 17/18] Add more flexible check for pointer aliasing during + vectorization It takes minimum between number of iteration and segment length + it helps to speed up loops with small number of iterations when only tail can + be vectorized + +--- + gcc/params.opt | 5 ++ + .../sve/var_stride_flexible_segment_len_1.c | 23 +++++++ + gcc/tree-data-ref.cc | 67 +++++++++++++------ + gcc/tree-data-ref.h | 11 ++- + gcc/tree-vect-data-refs.cc | 14 +++- + 5 files changed, 95 insertions(+), 25 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/var_stride_flexible_segment_len_1.c + +diff --git a/gcc/params.opt b/gcc/params.opt +index 6176d4790..7e5c119cf 100644 +--- a/gcc/params.opt ++++ b/gcc/params.opt +@@ -1180,6 +1180,11 @@ Maximum number of loop peels to enhance alignment of data references in a loop. + Common Joined UInteger Var(param_vect_max_version_for_alias_checks) Init(10) Param Optimization + Bound on number of runtime checks inserted by the vectorizer's loop versioning for alias check. + ++-param=vect-alias-flexible-segment-len= ++Common Joined UInteger Var(param_flexible_seg_len) Init(0) IntegerRange(0, 1) Param Optimization ++Use a minimum length of different segments. Currenlty the minimum between ++iteration number and vectorization length is chosen by this param. ++ + -param=vect-max-version-for-alignment-checks= + Common Joined UInteger Var(param_vect_max_version_for_alignment_checks) Init(6) Param Optimization + Bound on number of runtime checks inserted by the vectorizer's loop versioning for alignment check. +diff --git a/gcc/testsuite/gcc.target/aarch64/sve/var_stride_flexible_segment_len_1.c b/gcc/testsuite/gcc.target/aarch64/sve/var_stride_flexible_segment_len_1.c +new file mode 100644 +index 000000000..894f075f3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/aarch64/sve/var_stride_flexible_segment_len_1.c +@@ -0,0 +1,23 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -ftree-vectorize --param=vect-alias-flexible-segment-len=1" } */ ++ ++#define TYPE int ++#define SIZE 257 ++ ++void __attribute__ ((weak)) ++f (TYPE *x, TYPE *y, unsigned short n, long m __attribute__((unused))) ++{ ++ for (int i = 0; i < SIZE; ++i) ++ x[i * n] += y[i * n]; ++} ++ ++/* { dg-final { scan-assembler {\tld1w\tz[0-9]+} } } */ ++/* { dg-final { scan-assembler {\tst1w\tz[0-9]+} } } */ ++/* { dg-final { scan-assembler {\tldr\tw[0-9]+} } } */ ++/* { dg-final { scan-assembler {\tstr\tw[0-9]+} } } */ ++/* Should use a WAR check that multiplies by (VF-2)*4 rather than ++ an overlap check that multiplies by (257-1)*4. */ ++/* { dg-final { scan-assembler {\tcntb\t(x[0-9]+)\n.*\tsub\tx[0-9]+, \1, #8\n.*\tmul\tx[0-9]+,[^\n]*\1} } } */ ++/* One range check and a check for n being zero. */ ++/* { dg-final { scan-assembler-times {\t(?:cmp|tst)\t} 2 } } */ ++/* { dg-final { scan-assembler-times {\tccmp\t} 1 } } */ +diff --git a/gcc/tree-data-ref.cc b/gcc/tree-data-ref.cc +index 397792c35..e6ae9e847 100644 +--- a/gcc/tree-data-ref.cc ++++ b/gcc/tree-data-ref.cc +@@ -2329,31 +2329,15 @@ create_intersect_range_checks_index (class loop *loop, tree *cond_expr, + same arguments. Try to optimize cases in which the second access + is a write and in which some overlap is valid. */ + +-static bool +-create_waw_or_war_checks (tree *cond_expr, ++static void ++create_waw_or_war_checks2 (tree *cond_expr, tree seg_len_a, + const dr_with_seg_len_pair_t &alias_pair) + { + const dr_with_seg_len& dr_a = alias_pair.first; + const dr_with_seg_len& dr_b = alias_pair.second; + +- /* Check for cases in which: +- +- (a) DR_B is always a write; +- (b) the accesses are well-ordered in both the original and new code +- (see the comment above the DR_ALIAS_* flags for details); and +- (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR. */ +- if (alias_pair.flags & ~(DR_ALIAS_WAR | DR_ALIAS_WAW)) +- return false; +- +- /* Check for equal (but possibly variable) steps. */ + tree step = DR_STEP (dr_a.dr); +- if (!operand_equal_p (step, DR_STEP (dr_b.dr))) +- return false; +- +- /* Make sure that we can operate on sizetype without loss of precision. */ + tree addr_type = TREE_TYPE (DR_BASE_ADDRESS (dr_a.dr)); +- if (TYPE_PRECISION (addr_type) != TYPE_PRECISION (sizetype)) +- return false; + + /* All addresses involved are known to have a common alignment ALIGN. + We can therefore subtract ALIGN from an exclusive endpoint to get +@@ -2370,9 +2354,6 @@ create_waw_or_war_checks (tree *cond_expr, + fold_convert (ssizetype, indicator), + ssize_int (0)); + +- /* Get lengths in sizetype. */ +- tree seg_len_a +- = fold_convert (sizetype, rewrite_to_non_trapping_overflow (dr_a.seg_len)); + step = fold_convert (sizetype, rewrite_to_non_trapping_overflow (step)); + + /* Each access has the following pattern: +@@ -2479,6 +2460,50 @@ create_waw_or_war_checks (tree *cond_expr, + *cond_expr = fold_build2 (GT_EXPR, boolean_type_node, subject, limit); + if (dump_enabled_p ()) + dump_printf (MSG_NOTE, "using an address-based WAR/WAW test\n"); ++} ++ ++/* This is a wrapper function for create_waw_or_war_checks2. */ ++static bool ++create_waw_or_war_checks (tree *cond_expr, ++ const dr_with_seg_len_pair_t &alias_pair) ++{ ++ const dr_with_seg_len& dr_a = alias_pair.first; ++ const dr_with_seg_len& dr_b = alias_pair.second; ++ ++ /* Check for cases in which: ++ ++ (a) DR_B is always a write; ++ (b) the accesses are well-ordered in both the original and new code ++ (see the comment above the DR_ALIAS_* flags for details); and ++ (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR. */ ++ if (alias_pair.flags & ~(DR_ALIAS_WAR | DR_ALIAS_WAW)) ++ return false; ++ ++ /* Check for equal (but possibly variable) steps. */ ++ tree step = DR_STEP (dr_a.dr); ++ if (!operand_equal_p (step, DR_STEP (dr_b.dr))) ++ return false; ++ ++ /* Make sure that we can operate on sizetype without loss of precision. */ ++ tree addr_type = TREE_TYPE (DR_BASE_ADDRESS (dr_a.dr)); ++ if (TYPE_PRECISION (addr_type) != TYPE_PRECISION (sizetype)) ++ return false; ++ ++ /* Get lengths in sizetype. */ ++ tree seg_len_a ++ = fold_convert (sizetype, ++ rewrite_to_non_trapping_overflow (dr_a.seg_len)); ++ create_waw_or_war_checks2 (cond_expr, seg_len_a, alias_pair); ++ if (param_flexible_seg_len && dr_a.seg_len != dr_a.seg_len2) ++ { ++ tree seg_len2_a ++ = fold_convert (sizetype, ++ rewrite_to_non_trapping_overflow (dr_a.seg_len2)); ++ tree cond_expr2; ++ create_waw_or_war_checks2 (&cond_expr2, seg_len2_a, alias_pair); ++ *cond_expr = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, ++ *cond_expr, cond_expr2); ++ } + return true; + } + +diff --git a/gcc/tree-data-ref.h b/gcc/tree-data-ref.h +index f643a95b2..9bc5f16ee 100644 +--- a/gcc/tree-data-ref.h ++++ b/gcc/tree-data-ref.h +@@ -213,12 +213,19 @@ class dr_with_seg_len + public: + dr_with_seg_len (data_reference_p d, tree len, unsigned HOST_WIDE_INT size, + unsigned int a) +- : dr (d), seg_len (len), access_size (size), align (a) {} +- ++ : dr (d), seg_len (len), seg_len2 (len), access_size (size), align (a) ++ {} ++ dr_with_seg_len (data_reference_p d, tree len, tree len2, ++ unsigned HOST_WIDE_INT size, unsigned int a) ++ : dr (d), seg_len (len), seg_len2 (len2), access_size (size), align (a) ++ {} + data_reference_p dr; + /* The offset of the last access that needs to be checked minus + the offset of the first. */ + tree seg_len; ++ /* The second version of segment length. Currently this is used to ++ soften checks for a small number of iterations. */ ++ tree seg_len2; + /* A value that, when added to abs (SEG_LEN), gives the total number of + bytes in the segment. */ + poly_uint64 access_size; +diff --git a/gcc/tree-vect-data-refs.cc b/gcc/tree-vect-data-refs.cc +index 4e615b80b..04e68f621 100644 +--- a/gcc/tree-vect-data-refs.cc ++++ b/gcc/tree-vect-data-refs.cc +@@ -3646,6 +3646,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) + { + poly_uint64 lower_bound; + tree segment_length_a, segment_length_b; ++ tree segment_length2_a, segment_length2_b; + unsigned HOST_WIDE_INT access_size_a, access_size_b; + unsigned int align_a, align_b; + +@@ -3751,6 +3752,8 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) + { + segment_length_a = size_zero_node; + segment_length_b = size_zero_node; ++ segment_length2_a = size_zero_node; ++ segment_length2_b = size_zero_node; + } + else + { +@@ -3759,8 +3762,15 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) + length_factor = scalar_loop_iters; + else + length_factor = size_int (vect_factor); ++ /* In any case we should rememeber scalar_loop_iters ++ this helps to create flexible aliasing check ++ for small number of iterations. */ + segment_length_a = vect_vfa_segment_size (dr_info_a, length_factor); + segment_length_b = vect_vfa_segment_size (dr_info_b, length_factor); ++ segment_length2_a ++ = vect_vfa_segment_size (dr_info_a, scalar_loop_iters); ++ segment_length2_b ++ = vect_vfa_segment_size (dr_info_b, scalar_loop_iters); + } + access_size_a = vect_vfa_access_size (loop_vinfo, dr_info_a); + access_size_b = vect_vfa_access_size (loop_vinfo, dr_info_b); +@@ -3805,9 +3815,9 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) + } + + dr_with_seg_len dr_a (dr_info_a->dr, segment_length_a, +- access_size_a, align_a); ++ segment_length2_a, access_size_a, align_a); + dr_with_seg_len dr_b (dr_info_b->dr, segment_length_b, +- access_size_b, align_b); ++ segment_length2_b, access_size_b, align_b); + /* Canonicalize the order to be the one that's needed for accurate + RAW, WAR and WAW flags, in cases where the data references are + well-ordered. The order doesn't really matter otherwise, +-- +2.33.0 + diff --git a/0050-Port-IPA-prefetch-to-GCC-12.patch b/0050-Port-IPA-prefetch-to-GCC-12.patch new file mode 100644 index 0000000000000000000000000000000000000000..225a0c4a5ac98254cd279d08500212671feb90be --- /dev/null +++ b/0050-Port-IPA-prefetch-to-GCC-12.patch @@ -0,0 +1,2071 @@ +From 7ee50ce44c652e21ca8ad33dc4e175f02b51b072 Mon Sep 17 00:00:00 2001 +From: Diachkov Ilia +Date: Fri, 8 Mar 2024 06:50:39 +0800 +Subject: [PATCH 18/18] Port IPA prefetch to GCC 12 + +--- + gcc/Makefile.in | 1 + + gcc/cgraph.cc | 1 + + gcc/cgraph.h | 2 + + gcc/common.opt | 8 + + gcc/ipa-devirt.cc | 54 +- + gcc/ipa-prefetch.cc | 1819 +++++++++++++++++++++++++++++++++++++++++++ + gcc/ipa-sra.cc | 8 + + gcc/params.opt | 8 + + gcc/passes.def | 1 + + gcc/timevar.def | 1 + + gcc/tree-pass.h | 1 + + 11 files changed, 1902 insertions(+), 2 deletions(-) + create mode 100644 gcc/ipa-prefetch.cc + +diff --git a/gcc/Makefile.in b/gcc/Makefile.in +index 876000bda..10544e4a9 100644 +--- a/gcc/Makefile.in ++++ b/gcc/Makefile.in +@@ -1468,6 +1468,7 @@ OBJS = \ + ipa-modref.o \ + ipa-modref-tree.o \ + ipa-predicate.o \ ++ ipa-prefetch.o \ + ipa-profile.o \ + ipa-prop.o \ + ipa-param-manipulation.o \ +diff --git a/gcc/cgraph.cc b/gcc/cgraph.cc +index 3734c85db..7d738b891 100644 +--- a/gcc/cgraph.cc ++++ b/gcc/cgraph.cc +@@ -998,6 +998,7 @@ cgraph_node::create_indirect_edge (gcall *call_stmt, int ecf_flags, + edge->indirect_info = cgraph_allocate_init_indirect_info (); + edge->indirect_info->ecf_flags = ecf_flags; + edge->indirect_info->vptr_changed = true; ++ edge->indirect_info->targets = NULL; + + /* Record polymorphic call info. */ + if (!cloning_p +diff --git a/gcc/cgraph.h b/gcc/cgraph.h +index d96690326..b84ff2f98 100644 +--- a/gcc/cgraph.h ++++ b/gcc/cgraph.h +@@ -1659,6 +1659,8 @@ public: + int param_index; + /* ECF flags determined from the caller. */ + int ecf_flags; ++ /* Vector of potential call targets determined by analysis. */ ++ vec *targets; + + /* Number of speculative call targets, it's less than GCOV_TOPN_VALUES. */ + unsigned num_speculative_call_targets : 16; +diff --git a/gcc/common.opt b/gcc/common.opt +index 1eb62ada5..e65a06af9 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1328,6 +1328,10 @@ fdevirtualize + Common Var(flag_devirtualize) Optimization + Try to convert virtual calls to direct ones. + ++fipa-ic ++Common Var(flag_ipa_ic) Optimization Init(0) ++Perform interprocedural analysis of indirect calls. ++ + ficp + Common Var(flag_icp) Optimization Init(0) + Try to promote indirect calls to direct ones. +@@ -2367,6 +2371,10 @@ fprefetch-loop-arrays + Common Var(flag_prefetch_loop_arrays) Init(-1) Optimization + Generate prefetch instructions, if available, for arrays in loops. + ++fipa-prefetch ++Common Var(flag_ipa_prefetch) Init(0) Optimization ++Generate prefetch instructions, if available, using IPA info. ++ + fprofile + Common Var(profile_flag) + Enable basic program profiling code. +diff --git a/gcc/ipa-devirt.cc b/gcc/ipa-devirt.cc +index 318535d06..dd3562d56 100644 +--- a/gcc/ipa-devirt.cc ++++ b/gcc/ipa-devirt.cc +@@ -5758,6 +5758,54 @@ merge_fs_map_for_ftype_aliases () + } + } + ++/* Save results of indirect call analysis for the next passes. */ ++ ++static void ++save_analysis_results () ++{ ++ if (dump_file) ++ fprintf (dump_file, "\n\nSave results of indirect call analysis.\n"); ++ ++ struct cgraph_node *n; ++ FOR_EACH_FUNCTION (n) ++ { ++ cgraph_edge *e, *next; ++ for (e = n->indirect_calls; e; e = next) ++ { ++ next = e->next_callee; ++ if (e->indirect_info->polymorphic) ++ continue; ++ gcall *stmt = e->call_stmt; ++ gcc_assert (stmt != NULL); ++ tree call_fn = gimple_call_fn (stmt); ++ tree call_fn_ty = TREE_TYPE (call_fn); ++ if (!POINTER_TYPE_P (call_fn_ty)) ++ continue; ++ ++ tree ctype = TYPE_CANONICAL (TREE_TYPE (call_fn_ty)); ++ unsigned ctype_uid = ctype ? TYPE_UID (ctype) : 0; ++ if (!ctype_uid || unsafe_types->count (ctype_uid) ++ || !fs_map->count (ctype_uid)) ++ continue; ++ /* TODO: cleanup noninterposable aliases. */ ++ decl_set *decls = (*fs_map)[ctype_uid]; ++ if (dump_file) ++ { ++ fprintf (dump_file, "For call "); ++ print_gimple_stmt (dump_file, stmt, 0); ++ } ++ vec_alloc (e->indirect_info->targets, decls->size ()); ++ for (decl_set::const_iterator it = decls->begin (); ++ it != decls->end (); it++) ++ { ++ struct cgraph_node *target = cgraph_node::get (*it); ++ /* TODO: maybe discard some targets. */ ++ e->indirect_info->targets->quick_push (target); ++ } ++ } ++ } ++} ++ + /* Dump function types with set of functions corresponding to it. */ + + static void +@@ -5822,6 +5870,8 @@ collect_function_signatures () + } + } + merge_fs_map_for_ftype_aliases (); ++ if (flag_ipa_ic) ++ save_analysis_results (); + if (dump_file) + dump_function_signature_sets (); + } +@@ -6217,7 +6267,7 @@ ipa_icp (void) + optimize indirect calls. */ + collect_function_type_aliases (); + collect_function_signatures (); +- bool optimized = optimize_indirect_calls (); ++ bool optimized = flag_icp ? optimize_indirect_calls () : false; + + remove_type_alias_map (ta_map); + remove_type_alias_map (fta_map); +@@ -6264,7 +6314,7 @@ public: + /* opt_pass methods: */ + virtual bool gate (function *) + { +- return (optimize && flag_icp && !seen_error () ++ return (optimize && (flag_icp || flag_ipa_ic) && !seen_error () + && (in_lto_p || flag_whole_program)); + } + +diff --git a/gcc/ipa-prefetch.cc b/gcc/ipa-prefetch.cc +new file mode 100644 +index 000000000..aeea51105 +--- /dev/null ++++ b/gcc/ipa-prefetch.cc +@@ -0,0 +1,1819 @@ ++/* IPA prefetch optimizations. ++ Copyright (C) 2023 Free Software Foundation, Inc. ++ Contributed by Ilia Diachkov. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* IPA prefetch is an interprocedural pass that detects cases of indirect ++ memory access potentially in loops and inserts prefetch instructions ++ to optimize cache usage during these indirect memory accesses. */ ++ ++#include "config.h" ++#define INCLUDE_SET ++#define INCLUDE_MAP ++#include "system.h" ++#include "coretypes.h" ++#include "target.h" ++#include "tm.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "cgraph.h" ++#include "diagnostic-core.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "vec.h" ++#include "tree-pretty-print.h" ++#include "gimple-pretty-print.h" ++#include "gimple-iterator.h" ++#include "gimple-walk.h" ++#include "cfg.h" ++#include "cfghooks.h" ++#include "ssa.h" ++#include "tree-dfa.h" ++#include "fold-const.h" ++#include "tree-inline.h" ++#include "stor-layout.h" ++#include "tree-into-ssa.h" ++#include "tree-cfg.h" ++#include "alloc-pool.h" ++#include "symbol-summary.h" ++#include "ipa-prop.h" ++#include "tree-eh.h" ++#include "bitmap.h" ++#include "cfgloop.h" ++#include "langhooks.h" ++#include "ipa-param-manipulation.h" ++#include "ipa-fnsummary.h" ++#include "tree-ssa-loop.h" ++#include "tree-ssa-loop-ivopts.h" ++#include "gimple-fold.h" ++#include "gimplify.h" ++ ++namespace { ++ ++/* Call graph analysis. */ ++ ++typedef std::set edge_set; ++typedef std::set node_set; ++typedef std::map node_to_iedge_map; ++typedef std::map node_to_node_map; ++typedef std::map edge_in_loop; ++typedef std::map node_in_loop; ++ ++static edge_in_loop *el_map = NULL; ++static node_in_loop *nl_map = NULL; ++static node_to_iedge_map *icn_map = NULL; ++/* Contains nodes which reachable from a given node. */ ++static node_to_node_map *nn_map = NULL; ++ ++static bool ++can_be_optimized (cgraph_node *n) ++{ ++ /* TODO: maybe check also inlined_to. */ ++ return opt_for_fn (n->decl, flag_ipa_prefetch) && n->has_gimple_body_p (); ++} ++ ++static void ++analyze_cgraph_edge (cgraph_edge *e) ++{ ++ gcall *stmt = e->call_stmt; ++ gcc_checking_assert (e && stmt); ++ basic_block bb = gimple_bb (stmt); ++ gcc_checking_assert (bb); ++ /* TODO: add the same check for indirect calls. */ ++ if (e->callee && !can_be_optimized (e->callee)) ++ return; ++ ++ if (dump_file) ++ { ++ if (e->callee) ++ fprintf (dump_file, "\t%*s%s %s%*s ", 1, "", ++ e->callee->dump_name (), !e->inline_failed ? "inlined" : ++ cgraph_inline_failed_string (e->inline_failed), 1, ""); ++ else ++ fprintf (dump_file, "\t%*s%s %s%*s ", 1, "", "(indirect)", ++ "n/a", 1, ""); ++ fprintf (dump_file, "freq:%4.2f", e->sreal_frequency ().to_double ()); ++ ++ if (e->callee && cross_module_call_p (e)) ++ fprintf (dump_file, " cross module"); ++ ++ class ipa_call_summary *es = ipa_call_summaries->get (e); ++ if (es) ++ fprintf (dump_file, " loop depth:%2i size:%2i time: %2i", ++ es->loop_depth, es->call_stmt_size, es->call_stmt_time); ++ ++ fprintf (dump_file, "\n"); ++ } ++ if (e->indirect_info && dump_file) ++ { ++ fprintf (dump_file, "II: %p\n", (void *) e->indirect_info->targets); ++ unsigned i = 0; ++ cgraph_node *n; ++ if (e->indirect_info->targets) ++ for (i = 0; e->indirect_info->targets->iterate (i, &n); ++i) ++ fprintf (dump_file, "\t%s\n", n->dump_name ()); ++ } ++ ++ if (bb_loop_depth (bb) == 0) ++ return; ++ ++ if (dump_file) ++ { ++ if (e->callee) ++ fprintf (dump_file, "\tCall in loop (%d): ", bb_loop_depth (bb)); ++ else ++ fprintf (dump_file, "\tICall in loop (%d): ", bb_loop_depth (bb)); ++ print_gimple_stmt (dump_file, stmt, 0); ++ } ++ (*el_map)[e] = e->sreal_frequency ().to_double (); ++} ++ ++/* Walk optimizible cgraph nodes and collect info for edges. */ ++ ++static void ++analyse_cgraph () ++{ ++ cgraph_node *n; ++ cgraph_edge *e; ++ FOR_EACH_DEFINED_FUNCTION (n) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "\n\nProcesing function %s\n", n->dump_name ()); ++ print_generic_expr (dump_file, n->decl); ++ fprintf (dump_file, "\n"); ++ } ++ if (!can_be_optimized (n)) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Skip the function\n"); ++ continue; ++ } ++ ++ /* TODO: maybe remove loop info here. */ ++ push_cfun (DECL_STRUCT_FUNCTION (n->decl)); ++ calculate_dominance_info (CDI_DOMINATORS); ++ loop_optimizer_init (LOOPS_NORMAL); ++ ++ for (e = n->callees; e; e = e->next_callee) ++ analyze_cgraph_edge (e); ++ for (e = n->indirect_calls; e; e = e->next_callee) ++ analyze_cgraph_edge (e); ++ ++ free_dominance_info (CDI_DOMINATORS); ++ loop_optimizer_finalize (); ++ ++ pop_cfun (); ++ } ++} ++ ++/* Save indirect call info to node:icall_target map. */ ++ ++static void ++prepare_indirect_call_info () ++{ ++ cgraph_node *n, *n2; ++ cgraph_edge *e; ++ FOR_EACH_DEFINED_FUNCTION (n) ++ for (e = n->indirect_calls; e; e = e->next_callee) ++ { ++ if (!e->indirect_info->targets) ++ continue; ++ for (unsigned i = 0; e->indirect_info->targets->iterate (i, &n2); ++i) ++ { ++ if (icn_map->count (n2) == 0) ++ (*icn_map)[n2] = new edge_set; ++ (*icn_map)[n2]->insert (e); ++ } ++ } ++} ++ ++static void ++collect_nn_info (struct cgraph_edge *e, struct cgraph_node *n) ++{ ++ struct cgraph_node *n2 = e->caller; ++ if (nn_map->count (n2) == 0) ++ (*nn_map)[n2] = new node_set; ++ (*nn_map)[n2]->insert (n); ++ if (nn_map->count (n) != 0) ++ { ++ node_set *set = (*nn_map)[n]; ++ for (node_set::const_iterator it = set->begin (); ++ it != set->end (); it++) ++ (*nn_map)[n2]->insert (*it); ++ } ++} ++ ++static bool ++check_loop_info_for_cgraph_edge (struct cgraph_edge *e, struct cgraph_node *n, ++ bool &all_in_loop, double &rate) ++{ ++ collect_nn_info (e, n); ++ if (el_map->count (e) == 0) ++ { ++ if (dump_file) ++ fprintf (dump_file, "not all: %s->%s\n", ++ e->caller->dump_name (), n->dump_name ()); ++ all_in_loop = false; ++ return false; ++ } ++ rate += (*el_map)[e]; ++ return true; ++} ++ ++static bool ++update_loop_info_for_cgraph_node (struct cgraph_node *n) ++{ ++ bool changed = false, all_in_loop = true; ++ double rate = 0.0; ++ struct cgraph_edge *e; ++ ++ /* Iterate all direct callers. */ ++ if (n->callers) ++ for (e = n->callers; e; e = e->next_caller) ++ if (!check_loop_info_for_cgraph_edge (e, n, all_in_loop, rate)) ++ break; ++ ++ /* Iterate all possible indirect callers. */ ++ edge_set *set = (*icn_map)[n]; ++ if (set) ++ for (edge_set::const_iterator it = set->begin (); it != set->end (); it++) ++ if (!check_loop_info_for_cgraph_edge (*it, n, all_in_loop, rate)) ++ break; ++ ++ /* The node had 0 loop count but the rate is > 0, ++ so something is changed. */ ++ if (dump_file) ++ fprintf (dump_file, "%s: all=%d, nl->c=%lu, r=%4.2f\n", n->dump_name (), ++ all_in_loop, nl_map->count (n), rate); ++ ++ if (all_in_loop && nl_map->count (n) == 0 && rate > 0.0) ++ { ++ if (dump_file) ++ fprintf (dump_file, "%s: new rate %4.2f\n", n->dump_name (), rate); ++ changed = true; ++ } ++ if (all_in_loop) ++ { ++ (*nl_map)[n] = nl_map->count (n) ? (*nl_map)[n] + rate : rate; ++ for (e = n->callees; e; e = e->next_callee) ++ (*el_map)[e] = el_map->count (e) ? (*el_map)[e] + rate : rate; ++ for (e = n->indirect_calls; e; e = e->next_callee) ++ { ++ (*el_map)[e] = el_map->count (e) ? (*el_map)[e] + rate : rate; ++ if (dump_file) ++ fprintf (dump_file, "%s: reset indirect e=%p to %4.2f\n", ++ n->dump_name (), (void *) e, (*el_map)[e]); ++ } ++ } ++ return changed; ++} ++ ++/* Propagate in_loop info over the call graph. */ ++ ++static void ++propagate_loop_info_in_cgraph () ++{ ++ struct cgraph_node *n; ++ bool changed; ++ unsigned iteration = 0; ++ do ++ { ++ changed = false; ++ if (dump_file) ++ fprintf (dump_file, "\nIteration %u\n", iteration++); ++ FOR_EACH_DEFINED_FUNCTION (n) ++ { ++ if (!n->callers && !(*icn_map)[n]) ++ continue; ++ if (update_loop_info_for_cgraph_node (n)) ++ changed = true; ++ } ++ } while (changed); ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "\nList of nodes in loops:\n"); ++ FOR_EACH_DEFINED_FUNCTION (n) ++ if (nl_map->count (n) != 0) ++ fprintf (dump_file, "%s: %4.2f\n", n->dump_name (), (*nl_map)[n]); ++ fprintf (dump_file, "\nList of callable nodes:\n"); ++ FOR_EACH_DEFINED_FUNCTION (n) ++ if (nn_map->count (n) != 0) ++ { ++ node_set *set = (*nn_map)[n]; ++ fprintf (dump_file, "%s: ", n->dump_name ()); ++ for (node_set::const_iterator it = set->begin (); ++ it != set->end (); it++) ++ fprintf (dump_file, "%s ", (*it)->dump_name ()); ++ fprintf (dump_file, "\n"); ++ } ++ } ++} ++ ++/* Analysis of memory references. */ ++ ++typedef enum ++{ ++ MR_NONE, ++ MR_SIMPLE, ++ MR_POLYNOMIAL, ++ MR_INDIRECT, ++ MR_UNSUPPORTED ++} mr_type; ++const char *mr_type_str[] = ++ {"none", "simple", "poly", "indirect", "unsuppoted"}; ++ ++struct memref_type; ++typedef std::set memref_set; ++ ++static unsigned max_mr_id = 0; ++typedef struct memref_type ++{ ++ unsigned mr_id = 0; ++ mr_type type = MR_NONE; ++ tree mem = NULL_TREE; ++ tree base = NULL_TREE; ++ tree offset = NULL_TREE; ++ vec stmts = vNULL; ++ memref_set used_mrs; ++ bool is_store = false; ++ bool is_incr = false; ++ tree step = NULL_TREE; ++} memref_t; ++ ++typedef std::map tree_memref_map; ++typedef std::map > function_mrs_map; ++typedef std::map funct_mrs_map; ++typedef std::map memref_map; ++typedef std::map memref_tree_map; ++ ++typedef std::set stmt_set; ++typedef std::map tree_map; ++ ++tree_memref_map *tm_map; ++funct_mrs_map *fmrs_map; ++funct_mrs_map *optimize_mrs_map; ++memref_map *mr_candidate_map; ++tree_map *decl_map; ++ ++static void analyse_mem_ref (gimple *stmt, tree mem, memref_t* mr); ++ ++static memref_t* ++get_memref (gimple *stmt, tree mem, bool is_store) ++{ ++ if (tm_map->count (mem)) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Found mr %d for %p.\n", ++ (*tm_map)[mem]->mr_id, (void *) mem); ++ return (*tm_map)[mem]; ++ } ++ ++ memref_t *mr = new memref_t; ++ mr->mr_id = ++max_mr_id; ++ mr->is_store = is_store; ++ mr->mem = mem; ++ (*tm_map)[mem] = mr; ++ if (dump_file) ++ fprintf (dump_file, "Create mr %d for %p.\n", ++ mr->mr_id, (void *) mem); ++ analyse_mem_ref (stmt, mem, mr); ++ return mr; ++} ++ ++static void ++print_mrs_ids (memref_set &mrs, const char *start) ++{ ++ if (start) ++ fprintf (dump_file, "%s", start); ++ for (memref_set::const_iterator it = mrs.begin (); it != mrs.end (); it++) ++ fprintf (dump_file, "%d ", (*it)->mr_id); ++ fprintf (dump_file, "\n"); ++} ++ ++static void ++print_memref (memref_t *mr) ++{ ++ fprintf (dump_file, "MR (%d) type: %s (%s) mem: ", mr->mr_id, ++ mr_type_str[mr->type], mr->is_store ? "st" : "ld"); ++ print_generic_expr (dump_file, mr->mem); ++ fprintf (dump_file, "\nbase: "); ++ if (mr->base) ++ print_generic_expr (dump_file, mr->base); ++ else ++ fprintf (dump_file, "null"); ++ fprintf (dump_file, "\noffset: "); ++ if (mr->offset) ++ print_generic_expr (dump_file, mr->offset); ++ else ++ fprintf (dump_file, "null"); ++ fprintf (dump_file, "\nstmts:\n"); ++ for (unsigned int i = 0; i < mr->stmts.length (); i++) ++ print_gimple_stmt (dump_file, mr->stmts[i], 0); ++ print_mrs_ids (mr->used_mrs, "\tused memrefs: "); ++ if (mr->is_incr) ++ { ++ fprintf (dump_file, "\tis incremental with step: "); ++ print_generic_expr (dump_file, mr->step); ++ } ++ fprintf (dump_file, "\n"); ++} ++ ++/* If there is a simple load or store to a memory reference in STMT, returns ++ the location of the memory reference, and sets IS_STORE according to whether ++ it is a store or load. Otherwise, returns NULL. ++ TODO: from gcc/tree-ssa-loop-im.c, maybe make it global. */ ++ ++static tree * ++simple_mem_ref_in_stmt (gimple *stmt, bool *is_store) ++{ ++ tree *lhs, *rhs; ++ ++ /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */ ++ if (!gimple_assign_single_p (stmt)) ++ return NULL; ++ ++ lhs = gimple_assign_lhs_ptr (stmt); ++ rhs = gimple_assign_rhs1_ptr (stmt); ++ ++ if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt)) ++ { ++ *is_store = false; ++ return rhs; ++ } ++ else if (gimple_vdef (stmt) ++ && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs))) ++ { ++ *is_store = true; ++ return lhs; ++ } ++ else ++ return NULL; ++} ++ ++static void ++analyse_incremental (gimple *stmt, memref_t* mr) ++{ ++ if (!gimple_assign_single_p (stmt)) ++ return; ++ tree rhs1, rhs2; ++ /* TODO: maybe support other types of stmts. */ ++ while (stmt && is_gimple_assign (stmt)) ++ { ++ enum tree_code def_code = gimple_assign_rhs_code (stmt); ++ gimple_rhs_class rhs_class = gimple_assign_rhs_class (stmt); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Incr: in assign (%s)\n", ++ get_tree_code_name (def_code)); ++ print_gimple_stmt (dump_file, stmt, 3, TDF_DETAILS); ++ } ++ gcc_assert (def_code != ERROR_MARK); ++ switch (rhs_class) ++ { ++ case GIMPLE_TERNARY_RHS: ++ if (dump_file) ++ fprintf (dump_file, "Incr: unsupported trinary rhs\n"); ++ stmt = NULL; ++ break; ++ case GIMPLE_UNARY_RHS: ++ case GIMPLE_SINGLE_RHS: ++ rhs1 = gimple_assign_rhs1 (stmt); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Incr: (%s)", ++ get_tree_code_name (TREE_CODE (rhs1))); ++ print_generic_expr (dump_file, rhs1); ++ fprintf (dump_file, "\n"); ++ } ++ if (def_code == SSA_NAME) ++ stmt = SSA_NAME_DEF_STMT (rhs1); ++ else if (def_code == MEM_REF || def_code == COMPONENT_REF ++ || def_code == ARRAY_REF) ++ { ++ /* If we have dereference in address evaluation, ++ it's indirect memory access. */ ++ if (dump_file) ++ { ++ if (operand_equal_p (mr->mem, rhs1)) ++ fprintf (dump_file, "Incr: the same MEM\n"); ++ else ++ fprintf (dump_file, "Incr: diff MEM\n"); ++ print_generic_expr (dump_file, rhs1); ++ fprintf (dump_file, " "); ++ print_generic_expr (dump_file, mr->mem); ++ fprintf (dump_file, "\n"); ++ } ++ if (operand_equal_p (mr->mem, rhs1) && mr->step) ++ mr->is_incr = true; ++ stmt = NULL; ++ } ++ else ++ { ++ if (dump_file) ++ fprintf (dump_file, "Incr: unsupported unary/single\n"); ++ stmt = NULL; ++ } ++ break; ++ case GIMPLE_BINARY_RHS: ++ rhs1 = gimple_assign_rhs1 (stmt); ++ rhs2 = gimple_assign_rhs2 (stmt); ++ if (dump_file) ++ { ++ fprintf (dump_file, "(%s) (%s)", ++ get_tree_code_name (TREE_CODE (rhs1)), ++ get_tree_code_name (TREE_CODE (rhs2))); ++ print_generic_expr (dump_file, rhs1); ++ fprintf (dump_file, " "); ++ print_generic_expr (dump_file, rhs2); ++ fprintf (dump_file, "\n"); ++ } ++ /* TODO: extend for other types of incrementation. */ ++ if (TREE_CODE (rhs1) == SSA_NAME && TREE_CODE (rhs2) == INTEGER_CST) ++ { ++ stmt = SSA_NAME_DEF_STMT (rhs1); ++ mr->step = rhs2; ++ if (dump_file) ++ { ++ fprintf (dump_file, "Incr: const increment stmt: "); ++ print_gimple_stmt (dump_file, stmt, 3, TDF_DETAILS); ++ } ++ } ++ else ++ stmt = NULL; ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ if ((mr->step && !mr->is_incr) || (!mr->step && mr->is_incr)) ++ { ++ mr->step = NULL_TREE; ++ mr->is_incr = false; ++ } ++} ++ ++static mr_type ++get_memref_type (memref_t *base, memref_t *used, enum tree_code code) ++{ ++ /* TODO: improve memref type detection. */ ++ enum tree_code base_code = TREE_CODE (base->mem); ++ if (dump_file) ++ fprintf (dump_file, "get_memref_type: base=%d,%d used=%d,%d code=%s " ++ "base_code=%s\n", base->mr_id, base->type, ++ used ? used->mr_id : -1, used ? used->type : -1, ++ get_tree_code_name (code), get_tree_code_name (base_code)); ++ if (used) ++ { ++ if (base->type > used->type) ++ return base->type; ++ if (used->type == MR_SIMPLE) ++ return MR_POLYNOMIAL; ++ if (used->type == MR_POLYNOMIAL) ++ return base_code == ARRAY_REF ? MR_POLYNOMIAL : MR_INDIRECT; ++ if (used->type == MR_INDIRECT) ++ return MR_INDIRECT; ++ return MR_UNSUPPORTED; ++ } ++ if (code == MEM_REF || code == ARRAY_REF || code == COMPONENT_REF) ++ return base->type; ++ if (code == POINTER_PLUS_EXPR || code == PLUS_EXPR ++ || code == MINUS_EXPR || code == MULT_EXPR) ++ return base->type <= MR_POLYNOMIAL ? MR_POLYNOMIAL : base->type; ++ return base->type >= MR_INDIRECT ? base->type : MR_INDIRECT; ++} ++ ++/* Recursively walk defs of src expression and record used stmts and other mrs. ++ Return a base address candidate if it's found. */ ++ ++static tree ++analyse_addr_eval (tree src, memref_t* mr) ++{ ++ if (TREE_CODE (src) != SSA_NAME) ++ return NULL_TREE; ++ gimple *stmt = SSA_NAME_DEF_STMT (src); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Src_stmt: "); ++ print_gimple_stmt (dump_file, stmt, 0); ++ } ++ if (!is_gimple_assign (stmt)) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "Is not assign, stop analysis: "); ++ print_gimple_stmt (dump_file, stmt, 3, TDF_DETAILS); ++ } ++ mr->type = MR_UNSUPPORTED; ++ mr->stmts.safe_push (stmt); ++ return NULL_TREE; ++ } ++ enum tree_code def_code = gimple_assign_rhs_code (stmt); ++ if (def_code != MEM_REF && def_code != COMPONENT_REF ++ && def_code != ARRAY_REF) ++ mr->stmts.safe_push (stmt); ++ gimple_rhs_class rhs_class = gimple_assign_rhs_class (stmt); ++ tree rhs1, rhs2, base; ++ if (dump_file) ++ fprintf (dump_file, "In assign (%s): ", get_tree_code_name (def_code)); ++ ++ switch (rhs_class) ++ { ++ case GIMPLE_TERNARY_RHS: ++ if (dump_file) ++ fprintf (dump_file, "Unsupported trinary rhs\n"); ++ mr->type = MR_UNSUPPORTED; ++ return NULL_TREE; ++ case GIMPLE_UNARY_RHS: ++ case GIMPLE_SINGLE_RHS: ++ rhs1 = gimple_assign_rhs1 (stmt); ++ if (dump_file) ++ { ++ fprintf (dump_file, "(%s)", ++ get_tree_code_name (TREE_CODE (rhs1))); ++ print_generic_expr (dump_file, rhs1); ++ fprintf (dump_file, "\n"); ++ } ++ if (def_code == NOP_EXPR) ++ return analyse_addr_eval (rhs1, mr); ++ else if (def_code == MEM_REF || def_code == COMPONENT_REF ++ || def_code == ARRAY_REF) ++ { ++ memref_t *mr2 = get_memref (stmt, rhs1, false); ++ mr->type = get_memref_type (mr, mr2, def_code); ++ for (memref_set::const_iterator it = mr2->used_mrs.begin (); ++ it != mr2->used_mrs.end (); it++) ++ mr->used_mrs.insert (*it); ++ mr->used_mrs.insert (mr2); ++ return mr2->base; ++ } ++ else ++ { ++ if (dump_file) ++ fprintf (dump_file, "Unsupported unary/single\n"); ++ mr->type = MR_UNSUPPORTED; ++ } ++ return NULL_TREE; ++ case GIMPLE_BINARY_RHS: ++ rhs1 = gimple_assign_rhs1 (stmt); ++ rhs2 = gimple_assign_rhs2 (stmt); ++ if (dump_file) ++ { ++ fprintf (dump_file, "(%s) (%s)", ++ get_tree_code_name (TREE_CODE (rhs1)), ++ get_tree_code_name (TREE_CODE (rhs2))); ++ print_generic_expr (dump_file, rhs1); ++ fprintf (dump_file, " "); ++ print_generic_expr (dump_file, rhs2); ++ fprintf (dump_file, "\n"); ++ } ++ base = analyse_addr_eval (rhs1, mr); ++ analyse_addr_eval (rhs2, mr); ++ mr->type = get_memref_type (mr, NULL, def_code); ++ return base; ++ default: ++ gcc_unreachable (); ++ } ++ return NULL_TREE; ++} ++ ++static tree ++get_mem_ref_address_ssa_name (tree mem, tree base) ++{ ++ gcc_assert (TREE_CODE (mem) == MEM_REF); ++ if (base == NULL_TREE) ++ base = get_base_address (mem); ++ tree base_addr = NULL_TREE; ++ if (TREE_CODE (base) == MEM_REF) ++ base_addr = TREE_OPERAND (base, 0); ++ if (base_addr != NULL_TREE && TREE_CODE (base_addr) == SSA_NAME) ++ return base_addr; ++ return NULL_TREE; ++} ++ ++static void ++analyse_mem_ref (gimple *stmt, tree mem, memref_t* mr) ++{ ++ tree base = get_base_address (mem); ++ if (dump_file) ++ fprintf (dump_file, "Codes: base = %s, mem = %s\n", ++ base ? get_tree_code_name (TREE_CODE (base)) : "null", ++ mem ? get_tree_code_name (TREE_CODE (mem)) : "null"); ++ ++ mr->stmts.safe_push (stmt); ++ mr->base = base; ++ switch (TREE_CODE (mem)) ++ { ++ case COMPONENT_REF: ++ if (mr->is_store) ++ analyse_incremental (stmt, mr); ++ mr->type = MR_SIMPLE; ++ mr->offset = TREE_OPERAND (mem, 1); ++ return; ++ case ARRAY_REF: ++ analyse_addr_eval (TREE_OPERAND (mem, 1), mr); ++ return; ++ case MEM_REF: ++ { ++ tree base_addr = get_mem_ref_address_ssa_name (mem, base); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Base addr (%s): ", ++ base_addr ? get_tree_code_name (TREE_CODE (base_addr)) ++ : "null"); ++ if (base_addr) ++ print_generic_expr (dump_file, base_addr); ++ fprintf (dump_file, "\n"); ++ } ++ if (base_addr) ++ { ++ mr->base = analyse_addr_eval (base_addr, mr); ++ return; ++ } ++ break; ++ } ++ default: ++ break; ++ } ++ mr->type = MR_UNSUPPORTED; ++ mr->base = NULL_TREE; ++} ++ ++static void ++analyse_stmt (gimple *stmt) ++{ ++ bool is_store; ++ tree *mem = simple_mem_ref_in_stmt (stmt, &is_store); ++ if (!mem) ++ return; ++ if (dump_file) ++ { ++ fprintf (dump_file, "\n%s: mr is found in stmt (%s): ", ++ function_name (cfun), is_store ? "store" : "load"); ++ print_gimple_stmt (dump_file, stmt, 3, TDF_DETAILS); ++ } ++ memref_t *mr = get_memref (stmt, *mem, is_store); ++ (*fmrs_map)[cfun]->insert (mr); ++ if (dump_file) ++ print_memref (mr); ++} ++ ++/* Scan stmts for indirect stores/loads with bases passed as function args. */ ++ ++static void ++collect_memrefs_for_cgraph_node (struct cgraph_node *n) ++{ ++ if (dump_file) ++ fprintf (dump_file, "\nCollect indirect ptr info in %s\n", n->dump_name ()); ++ n->get_body (); ++ function *fn = DECL_STRUCT_FUNCTION (n->decl); ++ gcc_assert (fn && n->has_gimple_body_p ()); ++ ++ push_cfun (fn); ++ basic_block bb; ++ gimple_stmt_iterator si; ++ (*fmrs_map)[fn] = new memref_set; ++ FOR_EACH_BB_FN (bb, fn) ++ for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) ++ { ++ gimple *stmt = gsi_stmt (si); ++ analyse_stmt (stmt); ++ } ++ pop_cfun (); ++} ++ ++/* Walk cgraph nodes and collect memory references info. */ ++ ++static void ++collect_memory_references () ++{ ++ struct cgraph_node *n; ++ /* TODO: collect info only for loops and functions in loops. */ ++ FOR_EACH_DEFINED_FUNCTION (n) ++ if (nl_map->count (n) != 0 && n->has_gimple_body_p ()) ++ collect_memrefs_for_cgraph_node (n); ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "\n\nDump mem references:\n"); ++ FOR_EACH_DEFINED_FUNCTION (n) ++ if (nl_map->count (n) != 0 && n->has_gimple_body_p ()) ++ { ++ function *fn = DECL_STRUCT_FUNCTION (n->decl); ++ fprintf (dump_file, "\nIn function %s (%s):\n", function_name (fn), ++ nl_map->count (n) != 0 ? "in loop" : ""); ++ for (memref_set::const_iterator it = (*fmrs_map)[fn]->begin (); ++ it != (*fmrs_map)[fn]->end (); it++) ++ print_memref (*it); ++ } ++ } ++} ++ ++/* Analysis of loops. */ ++ ++memref_set *current_incr_mrs; ++memref_set *current_indirect_mrs; ++ ++static void ++collect_memref (memref_t *mr, class loop *loop, bool check_loop) ++{ ++ gimple *stmt = mr->stmts[0]; ++ gcc_assert (stmt); ++ if (check_loop && !flow_bb_inside_loop_p (loop, gimple_bb (stmt))) ++ return; ++ ++ /* TODO: Improve base invariant analysis for memrefs which are not local ++ (located in called functions). */ ++ bool is_base_inv = false; ++ if (mr->base) ++ is_base_inv = expr_invariant_in_loop_p (loop, mr->base); ++ ++ if (dump_file && (mr->type == MR_INDIRECT || mr->is_incr)) ++ { ++ fprintf (dump_file, "%s MR (%d): ", mr->is_incr ? "INCR" : "INDIRECT", ++ mr->mr_id); ++ print_generic_expr (dump_file, mr->mem); ++ fprintf (dump_file, "\twith base: "); ++ if (mr->base) ++ print_generic_expr (dump_file, mr->base); ++ else ++ fprintf (dump_file, "null"); ++ fprintf (dump_file, " (is_inv=%d)\n", is_base_inv); ++ } ++ ++ if (!is_base_inv) ++ return; ++ if (mr->type == MR_INDIRECT) ++ current_indirect_mrs->insert (mr); ++ if (mr->is_incr) ++ current_incr_mrs->insert (mr); ++} ++ ++static void ++analyse_callable_function (struct cgraph_node *n, class loop *loop) ++{ ++ if (dump_file) ++ fprintf (dump_file, "Callable (%s):\n", n->dump_name ()); ++ ++ function *fn = DECL_STRUCT_FUNCTION (n->decl); ++ if (fmrs_map->count (fn)) ++ for (memref_set::const_iterator it = (*fmrs_map)[fn]->begin (); ++ it != (*fmrs_map)[fn]->end (); it++) ++ collect_memref (*it, loop, false); ++} ++ ++static void ++insert_node_with_callable_nodes (node_set &s, struct cgraph_node *n) ++{ ++ s.insert (n); ++ if (nn_map->count (n) == 0) ++ return; ++ node_set *set = (*nn_map)[n]; ++ for (node_set::const_iterator it = set->begin (); it != set->end (); it++) ++ s.insert ((*it)); ++} ++ ++static bool ++compatible_memrefs_p (memref_t *mr1, memref_t *mr2, bool &compatible_offset) ++{ ++ if (!mr1->base || !mr2->base || !mr2->offset) ++ return false; ++ tree base_type1 = TYPE_MAIN_VARIANT (TREE_TYPE (mr1->base)); ++ tree base_type2 = TYPE_MAIN_VARIANT (TREE_TYPE (mr2->base)); ++ if (base_type1 != base_type2) ++ return false; ++ if (mr1->offset && mr1->offset == mr2->offset) ++ compatible_offset = true; ++ else ++ compatible_offset = false; ++ return true; ++} ++ ++static void ++compare_memrefs (memref_t* mr, memref_t* mr2) ++{ ++ /* TODO: improve analysis of memrefs from different functions: take into ++ account data flow and context. */ ++ bool compatible_offset = false; ++ if (!compatible_memrefs_p (mr, mr2, compatible_offset)) ++ return; ++ if (!compatible_offset) ++ { ++ for (memref_set::const_iterator it = mr->used_mrs.begin (); ++ it != mr->used_mrs.end (); it++) ++ if ((*it)->offset && (*it)->offset == mr2->offset) ++ { ++ compatible_offset = true; ++ if (dump_file) ++ fprintf (dump_file, "Used MR (%d) and INC MR have " ++ "the same offset\n", (*it)->mr_id); ++ break; ++ } ++ } ++ if (!compatible_offset) ++ return; ++ if (dump_file) ++ { ++ fprintf (dump_file, "MR (%d) is optimization candidate with offset: ", ++ mr->mr_id); ++ print_generic_expr (dump_file, mr2->offset); ++ fprintf (dump_file, "\n"); ++ } ++ ++ if (!mr_candidate_map->count (mr)) ++ { ++ (*mr_candidate_map)[mr] = mr2; ++ return; ++ } ++ /* TODO: support analysis with incrementation of different fields. */ ++ if ((*mr_candidate_map)[mr]->offset != mr2->offset) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "It conflicts with previously found MR (%d) " ++ "with offset ", (*mr_candidate_map)[mr]->mr_id); ++ if ((*mr_candidate_map)[mr] != NULL) ++ print_generic_expr (dump_file, (*mr_candidate_map)[mr]->offset); ++ fprintf (dump_file, ", disable the optimization\n"); ++ } ++ (*mr_candidate_map)[mr] = NULL; ++ } ++} ++ ++/* In the given loop and all functions called from the loop, collect ++ indirect/incremental memrefs with invariant base address and inductive ++ offset. */ ++ ++static void ++collect_memrefs_for_loop (class loop *loop, struct cgraph_node *n, ++ function *fn) ++{ ++ current_incr_mrs = new memref_set; ++ current_indirect_mrs = new memref_set; ++ ++ if (dump_file) ++ fprintf (dump_file, "Loop %d\n", loop->num); ++ if (fmrs_map->count (fn)) ++ for (memref_set::const_iterator it = (*fmrs_map)[fn]->begin (); ++ it != (*fmrs_map)[fn]->end (); it++) ++ collect_memref (*it, loop, true); ++ ++ /* Collect vector of functions called in the loop. */ ++ node_set set; ++ struct cgraph_edge *e; ++ struct cgraph_node *n2; ++ for (e = n->callees; e; e = e->next_callee) ++ { ++ gcall *stmt = e->call_stmt; ++ if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt))) ++ continue; ++ insert_node_with_callable_nodes (set, e->callee); ++ } ++ for (e = n->indirect_calls; e; e = e->next_callee) ++ { ++ gcall *stmt = e->call_stmt; ++ if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)) ++ || !e->indirect_info->targets) ++ continue; ++ for (unsigned i = 0; e->indirect_info->targets->iterate (i, &n2); ++i) ++ insert_node_with_callable_nodes (set, n2); ++ } ++ if (set.empty ()) ++ return; ++ if (dump_file) ++ fprintf (dump_file, "Go inside all callables of %s\n", n->dump_name ()); ++ ++ for (node_set::const_iterator it = set.begin (); it != set.end (); it++) ++ analyse_callable_function (*it, loop); ++ ++ if (!current_incr_mrs->empty () && !current_indirect_mrs->empty ()) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "Loop has both incr and indirect memrefs\n" ++ "Incr: "); ++ for (memref_set::const_iterator it = current_incr_mrs->begin (); ++ it != current_incr_mrs->end (); it++) ++ fprintf (dump_file, "%d ", (*it)->mr_id); ++ fprintf (dump_file, "\nIndirect: "); ++ for (memref_set::const_iterator it = current_indirect_mrs->begin (); ++ it != current_indirect_mrs->end (); it++) ++ fprintf (dump_file, "%d ", (*it)->mr_id); ++ fprintf (dump_file, "\n"); ++ } ++ /* Check if indirect memref has a base address similar to one of ++ incremental memref. */ ++ for (memref_set::const_iterator it = current_indirect_mrs->begin (); ++ it != current_indirect_mrs->end (); it++) ++ for (memref_set::const_iterator it2 = current_incr_mrs->begin (); ++ it2 != current_incr_mrs->end (); it2++) ++ compare_memrefs (*it, *it2); ++ } ++ ++ delete current_incr_mrs; ++ delete current_indirect_mrs; ++} ++ ++static void ++analyse_loops_in_cgraph_node (struct cgraph_node *n) ++{ ++ if (dump_file) ++ fprintf (dump_file, "\nAnalyse loops in %s\n", n->dump_name ()); ++ ++ n->get_body (); ++ function *fn = DECL_STRUCT_FUNCTION (n->decl); ++ gcc_assert (fn && n->has_gimple_body_p ()); ++ ++ push_cfun (fn); ++ calculate_dominance_info (CDI_DOMINATORS); ++ loop_optimizer_init (LOOPS_NORMAL); ++ ++ for (auto loop : loops_list (cfun, 0)) ++ { ++ class loop *outer = loop_outer (loop); ++ /* Walk only outermost loops. */ ++ if (outer->num != 0) ++ continue; ++ collect_memrefs_for_loop (loop, n, fn); ++ } ++ ++ free_dominance_info (CDI_DOMINATORS); ++ loop_optimizer_finalize (); ++ pop_cfun (); ++} ++ ++static void ++analyse_loops () ++{ ++ if (dump_file) ++ fprintf (dump_file, "\n\nLoops: procesing functions\n"); ++ cgraph_node *n; ++ FOR_EACH_DEFINED_FUNCTION (n) ++ { ++ if (!can_be_optimized (n)) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Skip the function\n"); ++ continue; ++ } ++ analyse_loops_in_cgraph_node (n); ++ } ++ ++ if (dump_file) ++ fprintf (dump_file, "\n\nList of optimization candidates:\n"); ++ ++ FOR_EACH_DEFINED_FUNCTION (n) ++ { ++ function *fn = DECL_STRUCT_FUNCTION (n->decl); ++ if (!can_be_optimized (n) || !fmrs_map->count (fn)) ++ continue; ++ for (memref_map::iterator it = mr_candidate_map->begin (); ++ it != mr_candidate_map->end (); ++it) ++ { ++ memref_t *mr = it->first, *mr2 = it->second; ++ if (mr2 == NULL || !(*fmrs_map)[fn]->count (mr)) ++ continue; ++ if (!optimize_mrs_map->count (fn)) ++ (*optimize_mrs_map)[fn] = new memref_set; ++ (*optimize_mrs_map)[fn]->insert (mr); ++ } ++ if (dump_file && optimize_mrs_map->count (fn)) ++ { ++ fprintf (dump_file, "Function %s\n", n->dump_name ()); ++ for (memref_set::const_iterator it ++ = (*optimize_mrs_map)[fn]->begin (); ++ it != (*optimize_mrs_map)[fn]->end (); it++) ++ { ++ memref_t *mr = *it, *mr2 = (*mr_candidate_map)[mr]; ++ fprintf (dump_file, "MRs %d,%d with incremental offset ", ++ mr->mr_id, mr2->mr_id); ++ print_generic_expr (dump_file, mr2->offset); ++ fprintf (dump_file, "\n"); ++ } ++ } ++ } ++} ++ ++/* Reduce the set filtering out memrefs with the same memory references, ++ return the result vector of memrefs. */ ++ ++static void ++reduce_memref_set (memref_set *set, vec &vec) ++{ ++ for (memref_set::const_iterator it = set->begin (); ++ it != set->end (); it++) ++ { ++ memref_t *mr1 = *it; ++ if (!vec.length ()) ++ vec.safe_push (mr1); ++ else ++ { ++ bool inserted = false; ++ for (unsigned int i = 0; i < vec.length (); i++) ++ { ++ /* mr2 is less than current mr1. */ ++ memref_t *mr2 = vec[i]; ++ if (operand_equal_p (mr1->mem, mr2->mem)) ++ { ++ if (dump_file) ++ fprintf (dump_file, "The same mems in MRs %d and %d\n", ++ mr1->mr_id, mr2->mr_id); ++ /* TODO: maybe build new memref which include stmts of both ++ mr1 and mr2. */ ++ if ((mr1->is_store && !mr2->is_store) ++ || mr1->stmts.length () > mr2->stmts.length ()) ++ { ++ inserted = true; ++ vec[i] = mr1; ++ } ++ } ++ } ++ if (!inserted) ++ vec.safe_push (mr1); ++ } ++ } ++ if (dump_file) ++ { ++ fprintf (dump_file, "MRs (%d) after filtering: ", vec.length ()); ++ for (unsigned int i = 0; i < vec.length (); i++) ++ fprintf (dump_file, "%d ", vec[i]->mr_id); ++ fprintf (dump_file, "\n"); ++ } ++} ++ ++static void ++find_nearest_common_dominator (memref_t *mr, basic_block &dom) ++{ ++ for (unsigned int i = 0; i < mr->stmts.length (); i++) ++ { ++ basic_block bb = gimple_bb (mr->stmts[i]); ++ gcc_assert (bb); ++ if (dom == bb) ++ continue; ++ if (dom) ++ dom = nearest_common_dominator (CDI_DOMINATORS, dom, bb); ++ else ++ dom = bb; ++ } ++} ++ ++/* Return true if DECL is a parameter or a SSA_NAME for a parameter. ++ TODO: from gcc/tree-inline.c, maybe make it global. */ ++ ++static bool ++is_parm (tree decl) ++{ ++ if (TREE_CODE (decl) == SSA_NAME) ++ { ++ decl = SSA_NAME_VAR (decl); ++ if (!decl) ++ return false; ++ } ++ ++ return (TREE_CODE (decl) == PARM_DECL); ++} ++ ++/* TODO: the following functions are inspired by remap in gcc/tree-inline.c, ++ maybe we can share some functionality. */ ++ ++static tree ++remap_name (tree name, gimple *stmt, bool is_lhs) ++{ ++ tree new_tree = NULL_TREE; ++ if (decl_map->count (name)) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "Find map: "); ++ print_generic_expr (dump_file, name); ++ fprintf (dump_file, " "); ++ print_generic_expr (dump_file, (*decl_map)[name]); ++ fprintf (dump_file, "\n"); ++ } ++ return unshare_expr ((*decl_map)[name]); ++ } ++ if (!is_lhs) ++ return name; ++ if (TREE_CODE (name) == SSA_NAME) ++ { ++ /* Remap anonymous SSA names or SSA names of anonymous decls. */ ++ tree var = SSA_NAME_VAR (name); ++ if (!var ++ || (!SSA_NAME_IS_DEFAULT_DEF (name) ++ && VAR_P (var) && !VAR_DECL_IS_VIRTUAL_OPERAND (var) ++ && DECL_ARTIFICIAL (var) && DECL_IGNORED_P (var) ++ && !DECL_NAME (var))) ++ { ++ new_tree = make_ssa_name (TREE_TYPE (name), stmt); ++ if (!var && SSA_NAME_IDENTIFIER (name)) ++ SET_SSA_NAME_VAR_OR_IDENTIFIER (new_tree, ++ SSA_NAME_IDENTIFIER (name)); ++ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_tree) ++ = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name); ++ /* So can range-info. */ ++ if (!POINTER_TYPE_P (TREE_TYPE (name)) ++ && SSA_NAME_RANGE_INFO (name)) ++ duplicate_ssa_name_range_info (new_tree, ++ SSA_NAME_RANGE_TYPE (name), ++ SSA_NAME_RANGE_INFO (name)); ++ /* TODO: maybe correct the insertion. */ ++ (*decl_map)[name] = new_tree; ++ if (dump_file) ++ { ++ fprintf (dump_file, "New map (no var): "); ++ print_generic_expr (dump_file, name); ++ fprintf (dump_file, " "); ++ print_generic_expr (dump_file, new_tree); ++ fprintf (dump_file, "\n"); ++ } ++ return new_tree; ++ } ++ /* TODO: maybe remap_name or do the same as before for SSA_NAME_VAR. */ ++ new_tree = make_ssa_name (TREE_TYPE (name), stmt); ++ (*decl_map)[name] = new_tree; ++ if (dump_file) ++ { ++ fprintf (dump_file, "New map: "); ++ print_generic_expr (dump_file, name); ++ fprintf (dump_file, " "); ++ print_generic_expr (dump_file, new_tree); ++ fprintf (dump_file, "\n"); ++ } ++ } ++ else if (VAR_P (name) || TREE_CODE (name) == PARM_DECL) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "VAR/PARM: "); ++ print_generic_expr (dump_file, name); ++ fprintf (dump_file, "\n"); ++ } ++ return name; ++ } ++ else ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "Unsupported: "); ++ print_generic_expr (dump_file, name); ++ fprintf (dump_file, "\n"); ++ } ++ //gcc_unreachable (); ++ return name; ++ } ++ return new_tree; ++} ++ ++/* Passed to walk_tree. Copies the node pointed to, if appropriate. */ ++ ++static tree ++ipa_copy_tree_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) ++{ ++ enum tree_code code = TREE_CODE (*tp); ++ enum tree_code_class cl = TREE_CODE_CLASS (code); ++ ++ /* We make copies of most nodes. */ ++ if (IS_EXPR_CODE_CLASS (cl) ++ || code == TREE_LIST ++ || code == TREE_VEC ++ || code == TYPE_DECL ++ || code == OMP_CLAUSE) ++ { ++ /* Because the chain gets clobbered when we make a copy, we save it ++ here. */ ++ tree chain = NULL_TREE, new_tree; ++ ++ if (CODE_CONTAINS_STRUCT (code, TS_COMMON)) ++ chain = TREE_CHAIN (*tp); ++ ++ /* Copy the node. */ ++ new_tree = copy_node (*tp); ++ ++ *tp = new_tree; ++ ++ /* Now, restore the chain, if appropriate. That will cause ++ walk_tree to walk into the chain as well. */ ++ if (code == PARM_DECL ++ || code == TREE_LIST ++ || code == OMP_CLAUSE) ++ TREE_CHAIN (*tp) = chain; ++ ++ /* For now, we don't update BLOCKs when we make copies. So, we ++ have to nullify all BIND_EXPRs. */ ++ if (TREE_CODE (*tp) == BIND_EXPR) ++ BIND_EXPR_BLOCK (*tp) = NULL_TREE; ++ } ++ else if (code == CONSTRUCTOR || code == STATEMENT_LIST) ++ gcc_unreachable (); ++ else if (TREE_CODE_CLASS (code) == tcc_type ++ || TREE_CODE_CLASS (code) == tcc_declaration ++ || TREE_CODE_CLASS (code) == tcc_constant) ++ *walk_subtrees = 0; ++ return NULL_TREE; ++} ++ ++/* Remap the GIMPLE operand pointed to by *TP. DATA is really a ++ 'struct walk_stmt_info *'. DATA->INFO is a 'gimple *'. ++ WALK_SUBTREES is used to indicate walk_gimple_op whether to keep ++ recursing into the children nodes of *TP. */ ++ ++static tree ++remap_gimple_op_r (tree *tp, int *walk_subtrees, void *data) ++{ ++ struct walk_stmt_info *wi_p = (struct walk_stmt_info *) data; ++ gimple *stmt = (gimple *) wi_p->info; ++ ++ /* For recursive invocations this is no longer the LHS itself. */ ++ bool is_lhs = wi_p->is_lhs; ++ wi_p->is_lhs = false; ++ ++ if (TREE_CODE (*tp) == SSA_NAME) ++ { ++ *tp = remap_name (*tp, stmt, is_lhs); ++ *walk_subtrees = 0; ++ if (is_lhs) ++ SSA_NAME_DEF_STMT (*tp) = wi_p->stmt; ++ return NULL; ++ } ++ else if (auto_var_in_fn_p (*tp, cfun->decl)) ++ { ++ /* Local variables and labels need to be replaced by equivalent ++ variables. We don't want to copy static variables; there's ++ only one of those, no matter how many times we inline the ++ containing function. Similarly for globals from an outer ++ function. */ ++ tree new_decl; ++ ++ /* Remap the declaration. */ ++ new_decl = remap_name (*tp, stmt, is_lhs); ++ gcc_assert (new_decl); ++ /* Replace this variable with the copy. */ ++ STRIP_TYPE_NOPS (new_decl); ++ /* ??? The C++ frontend uses void * pointer zero to initialize ++ any other type. This confuses the middle-end type verification. ++ As cloned bodies do not go through gimplification again the fixup ++ there doesn't trigger. */ ++ if (TREE_CODE (new_decl) == INTEGER_CST ++ && !useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (new_decl))) ++ new_decl = fold_convert (TREE_TYPE (*tp), new_decl); ++ *tp = new_decl; ++ *walk_subtrees = 0; ++ } ++ else if (TREE_CODE (*tp) == STATEMENT_LIST || TREE_CODE (*tp) == SAVE_EXPR) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, "Unexpected tree: "); ++ print_generic_expr (dump_file, *tp); ++ fprintf (dump_file, "\n"); ++ } ++ gcc_unreachable (); ++ } ++ else ++ { ++ /* Otherwise, just copy the node. Note that copy_tree_r already ++ knows not to copy VAR_DECLs, etc., so this is safe. */ ++ ++ if (TREE_CODE (*tp) == MEM_REF) ++ { ++ /* We need to re-canonicalize MEM_REFs from inline substitutions ++ that can happen when a pointer argument is an ADDR_EXPR. ++ Recurse here manually to allow that. */ ++ tree ptr = TREE_OPERAND (*tp, 0); ++ tree type = TREE_TYPE (*tp); ++ tree old = *tp; ++ walk_tree (&ptr, remap_gimple_op_r, data, NULL); ++ *tp = fold_build2 (MEM_REF, type, ptr, TREE_OPERAND (*tp, 1)); ++ TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old); ++ TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old); ++ TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old); ++ /* TODO: maybe support this case. */ ++ gcc_assert (MR_DEPENDENCE_CLIQUE (old) == 0); ++ /* We cannot propagate the TREE_THIS_NOTRAP flag if we have ++ remapped a parameter as the property might be valid only ++ for the parameter itself. */ ++ if (TREE_THIS_NOTRAP (old) && (!is_parm (TREE_OPERAND (old, 0)))) ++ TREE_THIS_NOTRAP (*tp) = 1; ++ REF_REVERSE_STORAGE_ORDER (*tp) = REF_REVERSE_STORAGE_ORDER (old); ++ *walk_subtrees = 0; ++ return NULL; ++ } ++ ++ /* Here is the "usual case". Copy this tree node, and then ++ tweak some special cases. */ ++ ipa_copy_tree_r (tp, walk_subtrees, NULL); ++ gcc_assert (!(TREE_CODE (*tp) == TARGET_EXPR && TREE_OPERAND (*tp, 3))); ++ if (TREE_CODE (*tp) == ADDR_EXPR) ++ { ++ /* TODO: If this used to be invariant, but is not any longer, ++ then regimplification is probably needed. */ ++ walk_tree (&TREE_OPERAND (*tp, 0), remap_gimple_op_r, data, NULL); ++ recompute_tree_invariant_for_addr_expr (*tp); ++ *walk_subtrees = 0; ++ } ++ } ++ /* TODO: maybe we need to update TREE_BLOCK (*tp). */ ++ ++ /* Keep iterating. */ ++ return NULL_TREE; ++} ++ ++static void ++create_cgraph_edge (cgraph_node *n, gimple *stmt) ++{ ++ gcall *call_stmt = dyn_cast (stmt); ++ basic_block bb = gimple_bb (stmt); ++ tree decl = gimple_call_fndecl (call_stmt); ++ if (!decl) ++ return; ++ struct cgraph_edge *e = n->create_edge (cgraph_node::get_create (decl), ++ call_stmt, bb->count); ++ /* TODO: maybe we need to store ipa_call_summary result. */ ++ ipa_call_summaries->get_create (e); ++} ++ ++/* Insert prefetch intrinsics in this function, return nonzero on success. */ ++ ++static int ++optimize_function (cgraph_node *n, function *fn) ++{ ++ /* In a given function, optimize only indirect memrefs with ++ the same incremental memref. ++ TODO: implement the optimization for other cases. */ ++ bool different_incrementals = false; ++ memref_t *first_mr = NULL; ++ memref_set used_mrs; ++ for (memref_set::const_iterator it = (*optimize_mrs_map)[fn]->begin (); ++ it != (*optimize_mrs_map)[fn]->end (); it++) ++ { ++ memref_t *mr = *it; ++ if (!first_mr) ++ first_mr = mr; ++ else if ((*mr_candidate_map)[first_mr] != (*mr_candidate_map)[mr]) ++ { ++ different_incrementals = true; ++ break; ++ } ++ for (memref_set::const_iterator it2 = mr->used_mrs.begin (); ++ it2 != mr->used_mrs.end (); it2++) ++ used_mrs.insert (*it2); ++ } ++ if (different_incrementals) ++ { ++ if (dump_file) ++ fprintf (dump_file, "It contains memrefs with different " ++ "incrementals. Skip the case.\n"); ++ return 0; ++ } ++ memref_t *inc_mr = (*mr_candidate_map)[first_mr]; ++ if (!inc_mr->stmts[0] || !gimple_assign_single_p (inc_mr->stmts[0])) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Incremental MR with unexpected stmt. " ++ "Skip the case.\n"); ++ return 0; ++ } ++ if (dump_file && !used_mrs.empty ()) ++ print_mrs_ids (used_mrs, "Common list of used mrs:\n"); ++ ++ /* Find a memref in used mrs which corresponds to the found incremental ++ memref. */ ++ memref_t *comp_mr = NULL; ++ for (memref_set::const_iterator it = used_mrs.begin (); ++ it != used_mrs.end (); it++) ++ { ++ bool c_offset; ++ if ((*it)->type != MR_SIMPLE || inc_mr->type != MR_SIMPLE ++ || !compatible_memrefs_p (*it, inc_mr, c_offset)) ++ continue; ++ if (c_offset) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Found compatible used MR (%d) and " ++ "incr MR (%d)\n", (*it)->mr_id, inc_mr->mr_id); ++ comp_mr = (*it); ++ } ++ } ++ if (!comp_mr || !comp_mr->stmts[0] ++ || !gimple_assign_single_p (comp_mr->stmts[0])) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Compatible MR in this function is not found " ++ " or it has unexpected stmt. Skip the case.\n"); ++ return 0; ++ } ++ ++ /* Filter out memrefs with the same memory references. ++ TODO: maybe do the same with used mrs. */ ++ vec vmrs = vNULL; ++ reduce_memref_set ((*optimize_mrs_map)[fn], vmrs); ++ ++ /* Find insertion place. Create new BB. */ ++ /* TODO: maybe it is useful to process also used_mrs. */ ++ basic_block dom_bb = NULL; ++ for (unsigned int i = 0; i < vmrs.length (); i++) ++ find_nearest_common_dominator (vmrs[i], dom_bb); ++ ++ if (!dom_bb) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Dominator bb for MRs is not found. " ++ "Skip the case.\n"); ++ return 0; ++ } ++ else if (dump_file) ++ fprintf (dump_file, "Dominator bb %d for MRs\n", dom_bb->index); ++ ++ split_block (dom_bb, (gimple *) NULL); ++ gimple_stmt_iterator gsi = gsi_last_bb (dom_bb); ++ ++ /* Create new inc var. Insert new_var = old_var + step * factor. */ ++ decl_map = new tree_map; ++ gcc_assert (comp_mr->stmts[0] && gimple_assign_single_p (comp_mr->stmts[0])); ++ tree inc_var = gimple_assign_lhs (comp_mr->stmts[0]); ++ gimple_seq stmts = NULL; ++ tree var_type = TREE_TYPE (inc_var); ++ enum tree_code inc_code; ++ if (TREE_CODE (var_type) == POINTER_TYPE) ++ inc_code = POINTER_PLUS_EXPR; ++ else ++ inc_code = PLUS_EXPR; ++ tree step = inc_mr->step; ++ unsigned dist_val = tree_to_uhwi (step) * param_ipa_prefetch_distance_factor; ++ tree dist = build_int_cst (TREE_TYPE (step), dist_val); ++ tree new_inc_var = gimple_build (&stmts, inc_code, var_type, inc_var, dist); ++ (*decl_map)[inc_var] = new_inc_var; ++ ++ /* Create other new vars. Insert new stmts. */ ++ struct walk_stmt_info wi; ++ stmt_set processed_stmts; ++ memref_tree_map mr_new_trees; ++ for (memref_set::const_iterator it = used_mrs.begin (); ++ it != used_mrs.end (); it++) ++ { ++ memref_t *mr = *it; ++ gimple *last_stmt = NULL; ++ if (mr == comp_mr) ++ continue; ++ for (int i = mr->stmts.length () - 1; i >= 0 ; i--) ++ { ++ if (processed_stmts.count (mr->stmts[i])) ++ continue; ++ processed_stmts.insert (mr->stmts[i]); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Copy stmt %d from used MR (%d):\n", ++ i, mr->mr_id); ++ print_gimple_stmt (dump_file, mr->stmts[i], 0); ++ } ++ /* Create a new copy of STMT and duplicate STMT's virtual ++ operands. */ ++ gimple *copy = gimple_copy (mr->stmts[i]); ++ gcc_checking_assert (!is_gimple_debug (copy)); ++ ++ /* Remap all the operands in COPY. */ ++ memset (&wi, 0, sizeof (wi)); ++ last_stmt = copy; ++ wi.info = copy; ++ walk_gimple_op (copy, remap_gimple_op_r, &wi); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Stmt %d after remap:\n",i); ++ print_gimple_stmt (dump_file, copy, 0); ++ } ++ gimple_seq_add_stmt (&stmts, copy); ++ } ++ gcc_assert (last_stmt); ++ mr_new_trees[mr] = gimple_assign_lhs (last_stmt); ++ if (dump_file) ++ { ++ fprintf (dump_file, "MR (%d) new mem: ", mr->mr_id); ++ print_generic_expr (dump_file, gimple_assign_lhs (last_stmt)); ++ fprintf (dump_file, "\n"); ++ } ++ } ++ /* On new load check page fault. */ ++ /* Insert prefetch instructions. */ ++ if (dump_file) ++ fprintf (dump_file, "Evaluate addresses and insert prefetch insn.\n"); ++ ++ vec pcalls = vNULL; ++ tree local; ++ switch (param_ipa_prefetch_locality) ++ { ++ case 0: ++ local = integer_zero_node; ++ break; ++ case 1: ++ local = integer_one_node; ++ break; ++ case 2: ++ local = build_int_cst (integer_type_node, 2); ++ break; ++ default: ++ case 3: ++ local = integer_three_node; ++ break; ++ } ++ for (unsigned int j = 0; j < vmrs.length (); j++) ++ { ++ memref_t *mr = vmrs[j]; ++ /* Don't need to copy the last stmt, since we insert prefetch insn ++ instead of it. */ ++ for (int i = mr->stmts.length () - 1; i >= 1 ; i--) ++ { ++ if (processed_stmts.count (mr->stmts[i])) ++ continue; ++ processed_stmts.insert (mr->stmts[i]); ++ ++ gimple *copy = gimple_copy (mr->stmts[i]); ++ gcc_checking_assert (!is_gimple_debug (copy)); ++ ++ /* Remap all the operands in COPY. */ ++ memset (&wi, 0, sizeof (wi)); ++ wi.info = copy; ++ walk_gimple_op (copy, remap_gimple_op_r, &wi); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Stmt %d after remap:\n",i); ++ print_gimple_stmt (dump_file, copy, 0); ++ } ++ gimple_seq_add_stmt (&stmts, copy); ++ } ++ gimple *last_stmt = mr->stmts[0]; ++ gcc_assert (last_stmt); ++ mr_new_trees[mr] = gimple_assign_lhs (last_stmt); ++ tree write_p = mr->is_store ? integer_one_node : integer_zero_node; ++ tree addr = get_mem_ref_address_ssa_name (mr->mem, NULL_TREE); ++ if (decl_map->count (addr)) ++ addr = (*decl_map)[addr]; ++ last_stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH), ++ 3, addr, write_p, local); ++ pcalls.safe_push (last_stmt); ++ gimple_seq_add_stmt (&stmts, last_stmt); ++ } ++ ++ gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT); ++ delete decl_map; ++ ++ /* Modify cgraph inserting calls to prefetch intrinsics. */ ++ for (unsigned i = 0; i < pcalls.length (); i++) ++ create_cgraph_edge (n, pcalls[i]); ++ ipa_update_overall_fn_summary (n); ++ ++ return 1; ++} ++ ++static int ++insert_prefetch () ++{ ++ int res = 0; ++ cgraph_node *n; ++ FOR_EACH_DEFINED_FUNCTION (n) ++ { ++ function *fn = DECL_STRUCT_FUNCTION (n->decl); ++ if (!optimize_mrs_map->count (fn)) ++ continue; ++ if (dump_file) ++ fprintf (dump_file, "Optimize function %s\n", n->dump_name ()); ++ push_cfun (DECL_STRUCT_FUNCTION (n->decl)); ++ calculate_dominance_info (CDI_DOMINATORS); ++ res |= optimize_function (n, fn); ++ free_dominance_info (CDI_DOMINATORS); ++ pop_cfun (); ++ } ++ return res; ++} ++ ++static unsigned int ++ipa_prefetch (void) ++{ ++ if (!targetm.have_prefetch ()) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Prefetch is not supported by the target.\n"); ++ return 0; ++ } ++ ++ unsigned int ret = 0; ++ el_map = new edge_in_loop; ++ nl_map = new node_in_loop; ++ icn_map = new node_to_iedge_map; ++ nn_map = new node_to_node_map; ++ tm_map = new tree_memref_map; ++ fmrs_map = new funct_mrs_map; ++ mr_candidate_map = new memref_map; ++ optimize_mrs_map = new funct_mrs_map; ++ ++ max_mr_id = 0; ++ /* TODO: check if we really need this init. */ ++ if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH)) ++ { ++ tree type = build_function_type_list (void_type_node, ++ const_ptr_type_node, NULL_TREE); ++ tree decl = add_builtin_function ("__builtin_prefetch", type, ++ BUILT_IN_PREFETCH, BUILT_IN_NORMAL, ++ NULL, NULL_TREE); ++ DECL_IS_NOVOPS (decl) = true; ++ set_builtin_decl (BUILT_IN_PREFETCH, decl, false); ++ } ++ ++ analyse_cgraph (); ++ prepare_indirect_call_info (); ++ propagate_loop_info_in_cgraph (); ++ collect_memory_references (); ++ analyse_loops (); ++ ++ /* TODO: implement some specific heuristics. */ ++ if (!optimize_mrs_map->empty ()) ++ ret = insert_prefetch (); ++ ++ delete el_map; ++ delete nl_map; ++ for (node_to_iedge_map::iterator it = icn_map->begin (); ++ it != icn_map->end (); ++it) ++ delete it->second; ++ delete icn_map; ++ for (node_to_node_map::iterator it = nn_map->begin (); ++ it != nn_map->end (); ++it) ++ delete it->second; ++ delete nn_map; ++ for (tree_memref_map::iterator it = tm_map->begin (); ++ it != tm_map->end (); ++it) ++ delete it->second; ++ delete tm_map; ++ for (funct_mrs_map::iterator it = fmrs_map->begin (); ++ it != fmrs_map->end (); ++it) ++ delete it->second; ++ delete fmrs_map; ++ delete mr_candidate_map; ++ delete optimize_mrs_map; ++ ++ /* TODO: maybe add other todos. */ ++ return ret | TODO_verify_all; ++} ++ ++const pass_data pass_data_ipa_prefetch = ++{ ++ SIMPLE_IPA_PASS, // type ++ "ipa_prefetch", // name ++ OPTGROUP_NONE, // optinfo_flags ++ TV_IPA_PREFETCH, // tv_id ++ 0, // properties_required ++ 0, // properties_provided ++ 0, // properties_destroyed ++ 0, // todo_flags_start ++ 0, // todo_flags_finish ++}; ++ ++class pass_ipa_prefetch : public simple_ipa_opt_pass ++{ ++public: ++ pass_ipa_prefetch (gcc::context *ctxt) ++ : simple_ipa_opt_pass (pass_data_ipa_prefetch, ctxt) ++ {} ++ ++ /* opt_pass methods: */ ++ virtual bool gate (function *); ++ virtual unsigned int execute (function *) ++ { ++ return ipa_prefetch (); ++ } ++}; // class pass_ipa_prefetch ++ ++bool ++pass_ipa_prefetch::gate (function *) ++{ ++ return (optimize >= 3 ++ && flag_ipa_prefetch ++ /* Don't bother doing anything if the program has errors. */ ++ && !seen_error () ++ && flag_lto_partition == LTO_PARTITION_ONE ++ /* Only enable struct optimizations in lto or whole_program. */ ++ && (in_lto_p || flag_whole_program)); ++} ++ ++} // anon namespace ++ ++simple_ipa_opt_pass * ++make_pass_ipa_prefetch (gcc::context *ctxt) ++{ ++ return new pass_ipa_prefetch (ctxt); ++} +diff --git a/gcc/ipa-sra.cc b/gcc/ipa-sra.cc +index 261a72085..5355cf2f4 100644 +--- a/gcc/ipa-sra.cc ++++ b/gcc/ipa-sra.cc +@@ -3033,6 +3033,14 @@ process_edge_to_unknown_caller (cgraph_edge *cs) + gcc_checking_assert (from_ifs); + isra_call_summary *csum = call_sums->get (cs); + ++ /* TODO: implement better support for call edges inserted after summary ++ collection but before sra wpa invocation. */ ++ if (!csum) ++ { ++ csum = call_sums->get_create (cs); ++ csum->m_return_ignored = true; ++ } ++ + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, "Processing an edge to an unknown caller from %s:\n", + cs->caller->dump_name ()); +diff --git a/gcc/params.opt b/gcc/params.opt +index 7e5c119cf..5c07e3986 100644 +--- a/gcc/params.opt ++++ b/gcc/params.opt +@@ -309,6 +309,14 @@ Maximum pieces that IPA-SRA tracks per formal parameter, as a consequence, also + Common Joined UInteger Var(param_ipa_sra_ptr_growth_factor) Init(2) Param Optimization + Maximum allowed growth of number and total size of new parameters that ipa-sra replaces a pointer to an aggregate with. + ++-param=ipa-prefetch-distance-factor= ++Common Joined UInteger Var(param_ipa_prefetch_distance_factor) Init(4) Param Optimization ++The factor represents the number of inductive variable incrementations to evaluate an indirect memory address for IPA prefetch. ++ ++-param=ipa-prefetch-locality= ++Common Joined UInteger Var(param_ipa_prefetch_locality) Init(3) Param Optimization ++The flag represents temporal locality values in the following way: 0:pstl1strm, 1:pstl3keep, 2:pstl2keep, 3:pstl1keep. ++ + -param=ira-loop-reserved-regs= + Common Joined UInteger Var(param_ira_loop_reserved_regs) Init(2) Param Optimization + The number of registers in each class kept unused by loop invariant motion. +diff --git a/gcc/passes.def b/gcc/passes.def +index b7d4f7b4e..4c1436766 100644 +--- a/gcc/passes.def ++++ b/gcc/passes.def +@@ -158,6 +158,7 @@ along with GCC; see the file COPYING3. If not see + NEXT_PASS (pass_ipa_icf); + NEXT_PASS (pass_ipa_devirt); + NEXT_PASS (pass_ipa_icp); ++ NEXT_PASS (pass_ipa_prefetch); + NEXT_PASS (pass_ipa_cp); + NEXT_PASS (pass_ipa_sra); + NEXT_PASS (pass_ipa_cdtor_merge); +diff --git a/gcc/timevar.def b/gcc/timevar.def +index 18a9f62cc..810ae20fd 100644 +--- a/gcc/timevar.def ++++ b/gcc/timevar.def +@@ -81,6 +81,7 @@ DEFTIMEVAR (TV_IPA_CONSTANT_PROP , "ipa cp") + DEFTIMEVAR (TV_IPA_INLINING , "ipa inlining heuristics") + DEFTIMEVAR (TV_IPA_FNSPLIT , "ipa function splitting") + DEFTIMEVAR (TV_IPA_COMDATS , "ipa comdats") ++DEFTIMEVAR (TV_IPA_PREFETCH , "ipa prefetch") + DEFTIMEVAR (TV_IPA_STRUCT_REORG , "ipa struct reorg optimization") + DEFTIMEVAR (TV_IPA_OPT , "ipa various optimizations") + DEFTIMEVAR (TV_IPA_LTO_DECOMPRESS , "lto stream decompression") +diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h +index 1733931c3..63f1192ae 100644 +--- a/gcc/tree-pass.h ++++ b/gcc/tree-pass.h +@@ -529,6 +529,7 @@ extern ipa_opt_pass_d *make_pass_ipa_icp (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_odr (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_reference (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_pure_const (gcc::context *ctxt); ++extern simple_ipa_opt_pass *make_pass_ipa_prefetch (gcc::context *ctxt); + extern simple_ipa_opt_pass *make_pass_ipa_struct_reorg (gcc::context *ctxt); + extern simple_ipa_opt_pass *make_pass_ipa_pta (gcc::context *ctxt); + extern simple_ipa_opt_pass *make_pass_ipa_tm (gcc::context *ctxt); +-- +2.33.0 + diff --git a/0051-Port-fixes-for-IPA-prefetch-to-GCC-12.patch b/0051-Port-fixes-for-IPA-prefetch-to-GCC-12.patch new file mode 100644 index 0000000000000000000000000000000000000000..dae19fa2574f55f6d82b4d3088d46691eff58bfb --- /dev/null +++ b/0051-Port-fixes-for-IPA-prefetch-to-GCC-12.patch @@ -0,0 +1,2216 @@ +From 4c262af8e178ac7c81b32be5b159b4d09a5841c9 Mon Sep 17 00:00:00 2001 +From: Diachkov Ilia +Date: Fri, 8 Mar 2024 07:07:50 +0800 +Subject: [PATCH 1/2] Port fixes for IPA prefetch to GCC 12 + +--- + gcc/ipa-devirt.cc | 9 +- + gcc/ipa-prefetch.cc | 174 +- + gcc/ipa-sra.cc | 7 + + gcc/params.opt | 4 +- + gcc/testsuite/gcc.dg/completion-1.c | 1 + + gcc/testsuite/gcc.dg/ipa/ipa-prefetch-xz.c | 1843 ++++++++++++++++++++ + 6 files changed, 1974 insertions(+), 64 deletions(-) + create mode 100644 gcc/testsuite/gcc.dg/ipa/ipa-prefetch-xz.c + +diff --git a/gcc/ipa-devirt.cc b/gcc/ipa-devirt.cc +index dd3562d56..dd000b401 100644 +--- a/gcc/ipa-devirt.cc ++++ b/gcc/ipa-devirt.cc +@@ -5029,9 +5029,12 @@ analyze_assign_stmt (gimple *stmt) + } + else + { +- fprintf (dump_file, "\nUnsupported rhs type %s in assign stmt: ", +- get_tree_code_name (TREE_CODE (rhs))); +- print_gimple_stmt (dump_file, stmt, 0); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nUnsupported rhs type %s in assign stmt: ", ++ get_tree_code_name (TREE_CODE (rhs))); ++ print_gimple_stmt (dump_file, stmt, 0); ++ } + gcc_unreachable (); + } + } +diff --git a/gcc/ipa-prefetch.cc b/gcc/ipa-prefetch.cc +index aeea51105..9537e4835 100644 +--- a/gcc/ipa-prefetch.cc ++++ b/gcc/ipa-prefetch.cc +@@ -167,6 +167,7 @@ analyse_cgraph () + } + + /* TODO: maybe remove loop info here. */ ++ n->get_body (); + push_cfun (DECL_STRUCT_FUNCTION (n->decl)); + calculate_dominance_info (CDI_DOMINATORS); + loop_optimizer_init (LOOPS_NORMAL); +@@ -942,6 +943,9 @@ compare_memrefs (memref_t* mr, memref_t* mr2) + (*mr_candidate_map)[mr] = mr2; + return; + } ++ /* Probably we shouldn't leave nulls in the map. */ ++ if ((*mr_candidate_map)[mr] == NULL) ++ return; + /* TODO: support analysis with incrementation of different fields. */ + if ((*mr_candidate_map)[mr]->offset != mr2->offset) + { +@@ -1090,6 +1094,15 @@ analyse_loops () + memref_t *mr = it->first, *mr2 = it->second; + if (mr2 == NULL || !(*fmrs_map)[fn]->count (mr)) + continue; ++ /* For now optimize only MRs that mem is MEM_REF. ++ TODO: support other MR types. */ ++ if (TREE_CODE (mr->mem) != MEM_REF) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Skip MR %d: unsupported tree code = %s\n", ++ mr->mr_id, get_tree_code_name (TREE_CODE (mr->mem))); ++ continue; ++ } + if (!optimize_mrs_map->count (fn)) + (*optimize_mrs_map)[fn] = new memref_set; + (*optimize_mrs_map)[fn]->insert (mr); +@@ -1102,7 +1115,7 @@ analyse_loops () + it != (*optimize_mrs_map)[fn]->end (); it++) + { + memref_t *mr = *it, *mr2 = (*mr_candidate_map)[mr]; +- fprintf (dump_file, "MRs %d,%d with incremental offset ", ++ fprintf (dump_file, "MRs %d, %d with incremental offset ", + mr->mr_id, mr2->mr_id); + print_generic_expr (dump_file, mr2->offset); + fprintf (dump_file, "\n"); +@@ -1435,6 +1448,52 @@ remap_gimple_op_r (tree *tp, int *walk_subtrees, void *data) + return NULL_TREE; + } + ++/* Copy stmt and remap its operands. */ ++ ++static gimple * ++gimple_copy_and_remap (gimple *stmt) ++{ ++ gimple *copy = gimple_copy (stmt); ++ gcc_checking_assert (!is_gimple_debug (copy)); ++ ++ /* Remap all the operands in COPY. */ ++ struct walk_stmt_info wi; ++ memset (&wi, 0, sizeof (wi)); ++ wi.info = copy; ++ walk_gimple_op (copy, remap_gimple_op_r, &wi); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Stmt copy after remap:\n"); ++ print_gimple_stmt (dump_file, copy, 0); ++ } ++ return copy; ++} ++ ++/* Copy and remap stmts listed in MR in reverse order to last_idx, skipping ++ processed ones. Insert new stmts to the sequence. */ ++ ++static gimple * ++gimple_copy_and_remap_memref_stmts (memref_t *mr, gimple_seq &stmts, ++ int last_idx, stmt_set &processed) ++{ ++ gimple *last_stmt = NULL; ++ for (int i = mr->stmts.length () - 1; i >= last_idx ; i--) ++ { ++ if (processed.count (mr->stmts[i])) ++ continue; ++ processed.insert (mr->stmts[i]); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Copy stmt %d from used MR (%d):\n", ++ i, mr->mr_id); ++ print_gimple_stmt (dump_file, mr->stmts[i], 0); ++ } ++ last_stmt = gimple_copy_and_remap (mr->stmts[i]); ++ gimple_seq_add_stmt (&stmts, last_stmt); ++ } ++ return last_stmt; ++} ++ + static void + create_cgraph_edge (cgraph_node *n, gimple *stmt) + { +@@ -1490,6 +1549,13 @@ optimize_function (cgraph_node *n, function *fn) + "Skip the case.\n"); + return 0; + } ++ if (!tree_fits_shwi_p (inc_mr->step)) ++ { ++ if (dump_file) ++ fprintf (dump_file, "Cannot represent incremental MR's step as " ++ "integer. Skip the case.\n"); ++ return 0; ++ } + if (dump_file && !used_mrs.empty ()) + print_mrs_ids (used_mrs, "Common list of used mrs:\n"); + +@@ -1539,16 +1605,44 @@ optimize_function (cgraph_node *n, function *fn) + return 0; + } + else if (dump_file) +- fprintf (dump_file, "Dominator bb %d for MRs\n", dom_bb->index); ++ { ++ fprintf (dump_file, "Dominator bb %d for MRs:\n", dom_bb->index); ++ gimple_dump_bb (dump_file, dom_bb, 0, dump_flags); ++ fprintf (dump_file, "\n"); ++ } + +- split_block (dom_bb, (gimple *) NULL); ++ /* Try to find comp_mr's stmt in the dominator bb. */ ++ gimple *last_used = NULL; ++ for (gimple_stmt_iterator si = gsi_last_bb (dom_bb); !gsi_end_p (si); ++ gsi_prev (&si)) ++ if (comp_mr->stmts[0] == gsi_stmt (si)) ++ { ++ last_used = gsi_stmt (si); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Last used stmt in dominator bb:\n"); ++ print_gimple_stmt (dump_file, last_used, 0); ++ } ++ break; ++ } ++ ++ split_block (dom_bb, last_used); + gimple_stmt_iterator gsi = gsi_last_bb (dom_bb); + + /* Create new inc var. Insert new_var = old_var + step * factor. */ + decl_map = new tree_map; + gcc_assert (comp_mr->stmts[0] && gimple_assign_single_p (comp_mr->stmts[0])); + tree inc_var = gimple_assign_lhs (comp_mr->stmts[0]); ++ /* If old_var definition dominates the current use, just use it, otherwise ++ evaluate it just before new inc var evaluation. */ + gimple_seq stmts = NULL; ++ stmt_set processed_stmts; ++ if (!dominated_by_p (CDI_DOMINATORS, dom_bb, gimple_bb (comp_mr->stmts[0]))) ++ { ++ gimple *tmp = gimple_copy_and_remap_memref_stmts (comp_mr, stmts, 0, ++ processed_stmts); ++ inc_var = gimple_assign_lhs (tmp); ++ } + tree var_type = TREE_TYPE (inc_var); + enum tree_code inc_code; + if (TREE_CODE (var_type) == POINTER_TYPE) +@@ -1556,52 +1650,28 @@ optimize_function (cgraph_node *n, function *fn) + else + inc_code = PLUS_EXPR; + tree step = inc_mr->step; +- unsigned dist_val = tree_to_uhwi (step) * param_ipa_prefetch_distance_factor; ++ HOST_WIDE_INT dist_val = tree_to_shwi (step) ++ * param_ipa_prefetch_distance_factor; + tree dist = build_int_cst (TREE_TYPE (step), dist_val); + tree new_inc_var = gimple_build (&stmts, inc_code, var_type, inc_var, dist); + (*decl_map)[inc_var] = new_inc_var; ++ if (dump_file) ++ { ++ fprintf (dump_file, "New distance value: %ld, new inc var: ", dist_val); ++ print_generic_expr (dump_file, new_inc_var); ++ fprintf (dump_file, "\n"); ++ } + + /* Create other new vars. Insert new stmts. */ +- struct walk_stmt_info wi; +- stmt_set processed_stmts; +- memref_tree_map mr_new_trees; + for (memref_set::const_iterator it = used_mrs.begin (); + it != used_mrs.end (); it++) + { + memref_t *mr = *it; +- gimple *last_stmt = NULL; + if (mr == comp_mr) + continue; +- for (int i = mr->stmts.length () - 1; i >= 0 ; i--) +- { +- if (processed_stmts.count (mr->stmts[i])) +- continue; +- processed_stmts.insert (mr->stmts[i]); +- if (dump_file) +- { +- fprintf (dump_file, "Copy stmt %d from used MR (%d):\n", +- i, mr->mr_id); +- print_gimple_stmt (dump_file, mr->stmts[i], 0); +- } +- /* Create a new copy of STMT and duplicate STMT's virtual +- operands. */ +- gimple *copy = gimple_copy (mr->stmts[i]); +- gcc_checking_assert (!is_gimple_debug (copy)); +- +- /* Remap all the operands in COPY. */ +- memset (&wi, 0, sizeof (wi)); +- last_stmt = copy; +- wi.info = copy; +- walk_gimple_op (copy, remap_gimple_op_r, &wi); +- if (dump_file) +- { +- fprintf (dump_file, "Stmt %d after remap:\n",i); +- print_gimple_stmt (dump_file, copy, 0); +- } +- gimple_seq_add_stmt (&stmts, copy); +- } ++ gimple *last_stmt = gimple_copy_and_remap_memref_stmts (mr, stmts, 0, ++ processed_stmts); + gcc_assert (last_stmt); +- mr_new_trees[mr] = gimple_assign_lhs (last_stmt); + if (dump_file) + { + fprintf (dump_file, "MR (%d) new mem: ", mr->mr_id); +@@ -1637,29 +1707,9 @@ optimize_function (cgraph_node *n, function *fn) + memref_t *mr = vmrs[j]; + /* Don't need to copy the last stmt, since we insert prefetch insn + instead of it. */ +- for (int i = mr->stmts.length () - 1; i >= 1 ; i--) +- { +- if (processed_stmts.count (mr->stmts[i])) +- continue; +- processed_stmts.insert (mr->stmts[i]); +- +- gimple *copy = gimple_copy (mr->stmts[i]); +- gcc_checking_assert (!is_gimple_debug (copy)); +- +- /* Remap all the operands in COPY. */ +- memset (&wi, 0, sizeof (wi)); +- wi.info = copy; +- walk_gimple_op (copy, remap_gimple_op_r, &wi); +- if (dump_file) +- { +- fprintf (dump_file, "Stmt %d after remap:\n",i); +- print_gimple_stmt (dump_file, copy, 0); +- } +- gimple_seq_add_stmt (&stmts, copy); +- } ++ gimple_copy_and_remap_memref_stmts (mr, stmts, 1, processed_stmts); + gimple *last_stmt = mr->stmts[0]; + gcc_assert (last_stmt); +- mr_new_trees[mr] = gimple_assign_lhs (last_stmt); + tree write_p = mr->is_store ? integer_one_node : integer_zero_node; + tree addr = get_mem_ref_address_ssa_name (mr->mem, NULL_TREE); + if (decl_map->count (addr)) +@@ -1668,6 +1718,11 @@ optimize_function (cgraph_node *n, function *fn) + 3, addr, write_p, local); + pcalls.safe_push (last_stmt); + gimple_seq_add_stmt (&stmts, last_stmt); ++ if (dump_file) ++ { ++ fprintf (dump_file, "Insert %d prefetch stmt:\n", j); ++ print_gimple_stmt (dump_file, last_stmt, 0); ++ } + } + + gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT); +@@ -1677,6 +1732,7 @@ optimize_function (cgraph_node *n, function *fn) + for (unsigned i = 0; i < pcalls.length (); i++) + create_cgraph_edge (n, pcalls[i]); + ipa_update_overall_fn_summary (n); ++ renumber_gimple_stmt_uids (DECL_STRUCT_FUNCTION (n->decl)); + + return 1; + } +@@ -1806,7 +1862,7 @@ pass_ipa_prefetch::gate (function *) + /* Don't bother doing anything if the program has errors. */ + && !seen_error () + && flag_lto_partition == LTO_PARTITION_ONE +- /* Only enable struct optimizations in lto or whole_program. */ ++ /* Only enable prefetch optimizations in lto or whole_program. */ + && (in_lto_p || flag_whole_program)); + } + +diff --git a/gcc/ipa-sra.cc b/gcc/ipa-sra.cc +index 5355cf2f4..471b3927c 100644 +--- a/gcc/ipa-sra.cc ++++ b/gcc/ipa-sra.cc +@@ -3393,6 +3393,13 @@ param_splitting_across_edge (cgraph_edge *cs) + gcc_checking_assert (from_ifs && from_ifs->m_parameters); + + isra_call_summary *csum = call_sums->get (cs); ++ /* TODO: implement better support for call edges inserted after summary ++ collection but before sra wpa invocation. */ ++ if (!csum) ++ { ++ csum = call_sums->get_create (cs); ++ csum->m_return_ignored = true; ++ } + gcc_checking_assert (csum); + unsigned args_count = csum->m_arg_flow.length (); + isra_func_summary *to_ifs = func_sums->get (callee); +diff --git a/gcc/params.opt b/gcc/params.opt +index 5c07e3986..50385dfd7 100644 +--- a/gcc/params.opt ++++ b/gcc/params.opt +@@ -314,8 +314,8 @@ Common Joined UInteger Var(param_ipa_prefetch_distance_factor) Init(4) Param Opt + The factor represents the number of inductive variable incrementations to evaluate an indirect memory address for IPA prefetch. + + -param=ipa-prefetch-locality= +-Common Joined UInteger Var(param_ipa_prefetch_locality) Init(3) Param Optimization +-The flag represents temporal locality values in the following way: 0:pstl1strm, 1:pstl3keep, 2:pstl2keep, 3:pstl1keep. ++Common Joined UInteger Var(param_ipa_prefetch_locality) Init(3) IntegerRange(0, 3) Param Optimization ++The flag represents temporal locality value between 0 and 3, the higher value means the higher temporal locality in the data. + + -param=ira-loop-reserved-regs= + Common Joined UInteger Var(param_ira_loop_reserved_regs) Init(2) Param Optimization +diff --git a/gcc/testsuite/gcc.dg/completion-1.c b/gcc/testsuite/gcc.dg/completion-1.c +index 64da64f1c..df2319c76 100644 +--- a/gcc/testsuite/gcc.dg/completion-1.c ++++ b/gcc/testsuite/gcc.dg/completion-1.c +@@ -2,6 +2,7 @@ + /* { dg-options "--completion=-fipa-ic" } */ + + /* { dg-begin-multiline-output "" } ++-fipa-ic + -fipa-icf + -fipa-icf-functions + -fipa-icf-variables +diff --git a/gcc/testsuite/gcc.dg/ipa/ipa-prefetch-xz.c b/gcc/testsuite/gcc.dg/ipa/ipa-prefetch-xz.c +new file mode 100644 +index 000000000..bd4fb2bdc +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/ipa/ipa-prefetch-xz.c +@@ -0,0 +1,1843 @@ ++/* { dg-do link } */ ++/* { dg-options "-O3 -fipa-ic -fipa-prefetch -flto -flto-partition=one -fdump-ipa-ipa_prefetch -fdump-ipa-icp" } */ ++/* { dg-require-effective-target lto } */ ++ ++/* Based on opensource xz code. */ ++ ++#include ++#include ++ ++typedef long int ptrdiff_t; ++typedef long unsigned int size_t; ++typedef unsigned int wchar_t; ++ ++typedef unsigned char __u_char; ++typedef unsigned short int __u_short; ++typedef unsigned int __u_int; ++typedef unsigned long int __u_long; ++ ++typedef signed char __int8_t; ++typedef unsigned char __uint8_t; ++typedef signed short int __int16_t; ++typedef unsigned short int __uint16_t; ++typedef signed int __int32_t; ++typedef unsigned int __uint32_t; ++ ++typedef signed long int __int64_t; ++typedef unsigned long int __uint64_t; ++ ++typedef __int8_t __int_least8_t; ++typedef __uint8_t __uint_least8_t; ++typedef __int16_t __int_least16_t; ++typedef __uint16_t __uint_least16_t; ++typedef __int32_t __int_least32_t; ++typedef __uint32_t __uint_least32_t; ++typedef __int64_t __int_least64_t; ++typedef __uint64_t __uint_least64_t; ++ ++typedef __int8_t int8_t; ++typedef __int16_t int16_t; ++typedef __int32_t int32_t; ++typedef __int64_t int64_t; ++ ++typedef __uint8_t uint8_t; ++typedef __uint16_t uint16_t; ++typedef __uint32_t uint32_t; ++typedef __uint64_t uint64_t; ++ ++typedef long int intptr_t; ++typedef unsigned long int uintptr_t; ++ ++static inline uint16_t ++read16ne(const uint8_t *buf) ++{ ++ uint16_t num; ++ memcpy(&num, buf, sizeof(num)); ++ return num; ++} ++ ++static inline uint32_t ++read32ne(const uint8_t *buf) ++{ ++ uint32_t num; ++ memcpy(&num, buf, sizeof(num)); ++ return num; ++} ++ ++static inline uint16_t ++aligned_read16ne(const uint8_t *buf) ++{ ++ uint16_t num; ++ memcpy(&num, __builtin_assume_aligned(buf, sizeof(num)), sizeof(num)); ++ return num; ++} ++ ++ ++static inline uint32_t ++aligned_read32ne(const uint8_t *buf) ++{ ++ uint32_t num; ++ memcpy(&num, __builtin_assume_aligned(buf, sizeof(num)), sizeof(num)); ++ return num; ++} ++ ++static inline uint64_t ++aligned_read64ne(const uint8_t *buf) ++{ ++ uint64_t num; ++ memcpy(&num, __builtin_assume_aligned(buf, sizeof(num)), sizeof(num)); ++ return num; ++} ++ ++typedef unsigned char lzma_bool; ++ ++typedef enum { ++ LZMA_RESERVED_ENUM = 0 ++} lzma_reserved_enum; ++ ++typedef enum { ++ LZMA_OK = 0, ++ LZMA_STREAM_END = 1, ++ LZMA_NO_CHECK = 2, ++ LZMA_UNSUPPORTED_CHECK = 3, ++ LZMA_GET_CHECK = 4, ++ LZMA_MEM_ERROR = 5, ++ LZMA_MEMLIMIT_ERROR = 6, ++ LZMA_FORMAT_ERROR = 7, ++ LZMA_OPTIONS_ERROR = 8, ++ LZMA_DATA_ERROR = 9, ++ LZMA_BUF_ERROR = 10, ++ LZMA_PROG_ERROR = 11, ++} lzma_ret; ++ ++typedef enum { ++ LZMA_RUN = 0, ++ LZMA_SYNC_FLUSH = 1, ++ LZMA_FULL_FLUSH = 2, ++ LZMA_FULL_BARRIER = 4, ++ LZMA_FINISH = 3 ++} lzma_action; ++ ++typedef struct { ++ void *( *alloc)(void *opaque, size_t nmemb, size_t size); ++ ++ void ( *free)(void *opaque, void *ptr); ++ ++ void *opaque; ++} lzma_allocator; ++ ++typedef uint64_t lzma_vli; ++ ++typedef enum { ++ LZMA_CHECK_NONE = 0, ++ LZMA_CHECK_CRC32 = 1, ++ LZMA_CHECK_CRC64 = 4, ++ LZMA_CHECK_SHA256 = 10 ++} lzma_check; ++ ++typedef struct { ++ lzma_vli id; ++ void *options; ++} lzma_filter; ++ ++typedef enum { ++ LZMA_MF_HC3 = 0x03, ++ LZMA_MF_HC4 = 0x04, ++ LZMA_MF_BT2 = 0x12, ++ LZMA_MF_BT3 = 0x13, ++ LZMA_MF_BT4 = 0x14 ++} lzma_match_finder; ++ ++typedef struct lzma_next_coder_s lzma_next_coder; ++ ++typedef struct lzma_filter_info_s lzma_filter_info; ++ ++typedef lzma_ret (*lzma_init_function)( ++ lzma_next_coder *next, const lzma_allocator *allocator, ++ const lzma_filter_info *filters); ++ ++typedef lzma_ret (*lzma_code_function)( ++ void *coder, const lzma_allocator *allocator, ++ const uint8_t *restrict in, size_t *restrict in_pos, ++ size_t in_size, uint8_t *restrict out, ++ size_t *restrict out_pos, size_t out_size, ++ lzma_action action); ++ ++typedef void (*lzma_end_function)( ++ void *coder, const lzma_allocator *allocator); ++ ++struct lzma_filter_info_s { ++ lzma_vli id; ++ lzma_init_function init; ++ void *options; ++}; ++ ++struct lzma_next_coder_s { ++ void *coder; ++ lzma_vli id; ++ uintptr_t init; ++ ++ lzma_code_function code; ++ lzma_end_function end; ++ void (*get_progress)(void *coder, ++ uint64_t *progress_in, uint64_t *progress_out); ++ ++ lzma_check (*get_check)(const void *coder); ++ lzma_ret (*memconfig)(void *coder, uint64_t *memusage, ++ uint64_t *old_memlimit, uint64_t new_memlimit); ++ lzma_ret (*update)(void *coder, const lzma_allocator *allocator, ++ const lzma_filter *filters, const lzma_filter *reversed_filters); ++}; ++ ++typedef struct { ++ uint32_t len; ++ uint32_t dist; ++} lzma_match; ++ ++typedef struct lzma_mf_s lzma_mf; ++struct lzma_mf_s { ++ uint8_t *buffer; ++ uint32_t size; ++ uint32_t keep_size_before; ++ uint32_t keep_size_after; ++ uint32_t offset; ++ uint32_t read_pos; ++ uint32_t read_ahead; ++ uint32_t read_limit; ++ uint32_t write_pos; ++ uint32_t pending; ++ uint32_t (*find)(lzma_mf *mf, lzma_match *matches); ++ void (*skip)(lzma_mf *mf, uint32_t num); ++ uint32_t *hash; ++ uint32_t *son; ++ uint32_t cyclic_pos; ++ uint32_t cyclic_size; ++ uint32_t hash_mask; ++ uint32_t depth; ++ uint32_t nice_len; ++ uint32_t match_len_max; ++ lzma_action action; ++ uint32_t hash_count; ++ uint32_t sons_count; ++}; ++ ++typedef struct { ++ size_t before_size; ++ size_t dict_size; ++ size_t after_size; ++ size_t match_len_max; ++ size_t nice_len; ++ lzma_match_finder match_finder; ++ uint32_t depth; ++ const uint8_t *preset_dict; ++ uint32_t preset_dict_size; ++} lzma_lz_options; ++ ++typedef struct { ++ void *coder; ++ lzma_ret (*code)(void *coder, ++ lzma_mf *restrict mf, uint8_t *restrict out, ++ size_t *restrict out_pos, size_t out_size); ++ void (*end)(void *coder, const lzma_allocator *allocator); ++ lzma_ret (*options_update)(void *coder, const lzma_filter *filter); ++} lzma_lz_encoder; ++ ++static inline const uint8_t * ++mf_ptr(const lzma_mf *mf) ++{ ++ return mf->buffer + mf->read_pos; ++} ++ ++static inline uint32_t ++mf_avail(const lzma_mf *mf) ++{ ++ return mf->write_pos - mf->read_pos; ++} ++ ++typedef struct { ++ uint32_t state[8]; ++ uint64_t size; ++} lzma_sha256_state; ++ ++typedef struct { ++ union { ++ uint8_t u8[64]; ++ uint32_t u32[16]; ++ uint64_t u64[8]; ++ } buffer; ++ union { ++ uint32_t crc32; ++ uint64_t crc64; ++ lzma_sha256_state sha256; ++ } state; ++} lzma_check_state; ++ ++// The table is constantly initialized in the original code. ++// Skip it in the test. ++const uint32_t lzma_crc32_table[8][256]; ++ ++static inline uint32_t __attribute__((__always_inline__)) ++lzma_memcmplen(const uint8_t *buf1, const uint8_t *buf2, ++ uint32_t len, uint32_t limit) ++{ ++ while (len < limit) { ++ uint32_t x = read32ne(buf1 + len) - read32ne(buf2 + len); ++ if (x != 0) { ++ if ((x & 0xFFFF) == 0) { ++ len += 2; ++ x >>= 16; ++ } ++ ++ if ((x & 0xFF) == 0) ++ ++len; ++ ++ return ((len) < (limit) ? (len) : (limit)); ++ } ++ ++ len += 4; ++ } ++ ++ return limit; ++} ++ ++extern uint32_t ++lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches) ++{ ++ const uint32_t count = mf->find(mf, matches); ++ uint32_t len_best = 0; ++ ++ if (count > 0) { ++ len_best = matches[count - 1].len; ++ if (len_best == mf->nice_len) { ++ uint32_t limit = mf_avail(mf) + 1; ++ if (limit > mf->match_len_max) ++ limit = mf->match_len_max; ++ const uint8_t *p1 = mf_ptr(mf) - 1; ++ const uint8_t *p2 = p1 - matches[count - 1].dist - 1; ++ len_best = lzma_memcmplen(p1, p2, len_best, limit); ++ } ++ } ++ ++ *count_ptr = count; ++ ++mf->read_ahead; ++ ++ return len_best; ++} ++ ++static void ++normalize(lzma_mf *mf) ++{ ++ const uint32_t subvalue = ((4294967295U) - mf->cyclic_size); ++ ++ for (uint32_t i = 0; i < mf->hash_count; ++i) { ++ if (mf->hash[i] <= subvalue) ++ mf->hash[i] = 0; ++ else ++ mf->hash[i] -= subvalue; ++ } ++ ++ for (uint32_t i = 0; i < mf->sons_count; ++i) { ++ if (mf->son[i] <= subvalue) ++ mf->son[i] = 0; ++ else ++ mf->son[i] -= subvalue; ++ } ++ ++ mf->offset -= subvalue; ++ return; ++} ++ ++static void ++move_pos(lzma_mf *mf) ++{ ++ if (++mf->cyclic_pos == mf->cyclic_size) ++ mf->cyclic_pos = 0; ++ ++mf->read_pos; ++ if (__builtin_expect(mf->read_pos + mf->offset == (4294967295U), 0 )) ++ normalize(mf); ++} ++ ++static void ++move_pending(lzma_mf *mf) ++{ ++ ++mf->read_pos; ++ ++mf->pending; ++} ++ ++static lzma_match * ++hc_find_func( ++ const uint32_t len_limit, ++ const uint32_t pos, ++ const uint8_t *const cur, ++ uint32_t cur_match, ++ uint32_t depth, ++ uint32_t *const son, ++ const uint32_t cyclic_pos, ++ const uint32_t cyclic_size, ++ lzma_match *matches, ++ uint32_t len_best) ++{ ++ son[cyclic_pos] = cur_match; ++ ++ while (1) { ++ const uint32_t delta = pos - cur_match; ++ if (depth-- == 0 || delta >= cyclic_size) ++ return matches; ++ ++ const uint8_t *const pb = cur - delta; ++ cur_match = son[cyclic_pos - delta ++ + (delta > cyclic_pos ? cyclic_size : 0)]; ++ ++ if (pb[len_best] == cur[len_best] && pb[0] == cur[0]) { ++ uint32_t len = lzma_memcmplen(pb, cur, 1, len_limit); ++ ++ if (len_best < len) { ++ len_best = len; ++ matches->len = len; ++ matches->dist = delta - 1; ++ ++matches; ++ ++ if (len == len_limit) ++ return matches; ++ } ++ } ++ } ++} ++ ++extern uint32_t ++lzma_mf_hc3_find(lzma_mf *mf, lzma_match *matches) ++{ ++ uint32_t len_limit = mf_avail(mf); ++ if (mf->nice_len <= len_limit) { ++ len_limit = mf->nice_len; ++ } else if (len_limit < (3)) { ++ move_pending(mf); ++ return 0; ++ } ++ const uint8_t *cur = mf_ptr(mf); ++ const uint32_t pos = mf->read_pos + mf->offset; ++ uint32_t matches_count = 0; ++ ++ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1]; ++ const uint32_t hash_2_value = temp & ((1U << 10) - 1); ++ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask; ++ ++ const uint32_t delta2 = pos - mf->hash[hash_2_value]; ++ const uint32_t cur_match = mf->hash[((1U << 10)) + hash_value]; ++ ++ mf->hash[hash_2_value] = pos; ++ mf->hash[((1U << 10)) + hash_value] = pos; ++ ++ uint32_t len_best = 2; ++ ++ if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { ++ len_best = lzma_memcmplen(cur - delta2, cur, len_best, len_limit); ++ ++ matches[0].len = len_best; ++ matches[0].dist = delta2 - 1; ++ matches_count = 1; ++ ++ if (len_best == len_limit) { ++ mf->son[mf->cyclic_pos] = cur_match; ++ move_pos(mf); ++ return 1; ++ } ++ } ++ ++ matches_count = hc_find_func(len_limit, pos, cur, cur_match, mf->depth, ++ mf->son, mf->cyclic_pos, mf->cyclic_size, ++ matches + matches_count, len_best) - matches; ++ move_pos(mf); ++ return matches_count; ++} ++ ++extern void ++lzma_mf_hc3_skip(lzma_mf *mf, uint32_t amount) ++{ ++ do { ++ if (mf_avail(mf) < 3) { ++ move_pending(mf); ++ continue; ++ } ++ ++ const uint8_t *cur = mf_ptr(mf); ++ const uint32_t pos = mf->read_pos + mf->offset; ++ ++ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1]; ++ const uint32_t hash_2_value = temp & ((1U << 10) - 1); ++ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask; ++ ++ const uint32_t cur_match ++ = mf->hash[((1U << 10)) + hash_value]; ++ ++ mf->hash[hash_2_value] = pos; ++ mf->hash[((1U << 10)) + hash_value] = pos; ++ ++ do { mf->son[mf->cyclic_pos] = cur_match; move_pos(mf); } while (0); ++ ++ } while (--amount != 0); ++} ++ ++extern uint32_t ++lzma_mf_hc4_find(lzma_mf *mf, lzma_match *matches) ++{ ++ uint32_t len_limit = mf_avail(mf); ++ if (mf->nice_len <= len_limit) { ++ len_limit = mf->nice_len; ++ } else if (len_limit < (4)) { ++ move_pending(mf); ++ return 0; ++ } ++ const uint8_t *cur = mf_ptr(mf); ++ const uint32_t pos = mf->read_pos + mf->offset; ++ uint32_t matches_count = 0; ++ ++ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1]; ++ const uint32_t hash_2_value = temp & ((1U << 10) - 1); ++ const uint32_t hash_3_value = (temp ^ ((uint32_t)(cur[2]) << 8)) ++ & ((1U << 16) - 1); ++ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8) ++ ^ (lzma_crc32_table[0][cur[3]] << 5)) ++ & mf->hash_mask; ++ uint32_t delta2 = pos - mf->hash[hash_2_value]; ++ const uint32_t delta3 ++ = pos - mf->hash[((1U << 10)) + hash_3_value]; ++ const uint32_t cur_match = mf->hash[((1U << 10) + (1U << 16)) + hash_value]; ++ ++ mf->hash[hash_2_value ] = pos; ++ mf->hash[((1U << 10)) + hash_3_value] = pos; ++ mf->hash[((1U << 10) + (1U << 16)) + hash_value] = pos; ++ ++ uint32_t len_best = 1; ++ ++ if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { ++ len_best = 2; ++ matches[0].len = 2; ++ matches[0].dist = delta2 - 1; ++ matches_count = 1; ++ } ++ ++ if (delta2 != delta3 && delta3 < mf->cyclic_size ++ && *(cur - delta3) == *cur) { ++ len_best = 3; ++ matches[matches_count++].dist = delta3 - 1; ++ delta2 = delta3; ++ } ++ ++ if (matches_count != 0) { ++ len_best = lzma_memcmplen(cur - delta2, cur, ++ len_best, len_limit); ++ ++ matches[matches_count - 1].len = len_best; ++ ++ if (len_best == len_limit) { ++ mf->son[mf->cyclic_pos] = cur_match; move_pos(mf); ++ return matches_count; ++ } ++ } ++ ++ if (len_best < 3) ++ len_best = 3; ++ ++ matches_count = hc_find_func(len_limit, pos, cur, cur_match, mf->depth, ++ mf->son, mf->cyclic_pos, mf->cyclic_size, ++ matches + matches_count, len_best) - matches; ++ move_pos(mf); ++ return matches_count; ++} ++ ++extern void ++lzma_mf_hc4_skip(lzma_mf *mf, uint32_t amount) ++{ ++ do { ++ if (mf_avail(mf) < 4) { ++ move_pending(mf); ++ continue; ++ } ++ ++ const uint8_t *cur = mf_ptr(mf); ++ const uint32_t pos = mf->read_pos + mf->offset; ++ ++ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1]; ++ const uint32_t hash_2_value = temp & ((1U << 10) - 1); ++ const uint32_t hash_3_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & ((1U << 16) - 1); ++ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8) ++ ^ (lzma_crc32_table[0][cur[3]] << 5)) ++ & mf->hash_mask; ++ ++ const uint32_t cur_match ++ = mf->hash[((1U << 10) + (1U << 16)) + hash_value]; ++ ++ mf->hash[hash_2_value] = pos; ++ mf->hash[((1U << 10)) + hash_3_value] = pos; ++ mf->hash[((1U << 10) + (1U << 16)) + hash_value] = pos; ++ ++ mf->son[mf->cyclic_pos] = cur_match; ++ move_pos(mf); ++ } while (--amount != 0); ++} ++ ++static lzma_match * ++bt_find_func( ++ const uint32_t len_limit, ++ const uint32_t pos, ++ const uint8_t *const cur, ++ uint32_t cur_match, ++ uint32_t depth, ++ uint32_t *const son, ++ const uint32_t cyclic_pos, ++ const uint32_t cyclic_size, ++ lzma_match *matches, ++ uint32_t len_best) ++{ ++ uint32_t *ptr0 = son + (cyclic_pos << 1) + 1; ++ uint32_t *ptr1 = son + (cyclic_pos << 1); ++ ++ uint32_t len0 = 0; ++ uint32_t len1 = 0; ++ ++ while (1) { ++ const uint32_t delta = pos - cur_match; ++ if (depth-- == 0 || delta >= cyclic_size) { ++ *ptr0 = 0; ++ *ptr1 = 0; ++ return matches; ++ } ++ ++ uint32_t *const pair = son + ((cyclic_pos - delta ++ + (delta > cyclic_pos ? cyclic_size : 0)) ++ << 1); ++ ++ const uint8_t *const pb = cur - delta; ++ uint32_t len = ((len0) < (len1) ? (len0) : (len1)); ++ ++ if (pb[len] == cur[len]) { ++ len = lzma_memcmplen(pb, cur, len + 1, len_limit); ++ ++ if (len_best < len) { ++ len_best = len; ++ matches->len = len; ++ matches->dist = delta - 1; ++ ++matches; ++ ++ if (len == len_limit) { ++ *ptr1 = pair[0]; ++ *ptr0 = pair[1]; ++ return matches; ++ } ++ } ++ } ++ ++ if (pb[len] < cur[len]) { ++ *ptr1 = cur_match; ++ ptr1 = pair + 1; ++ cur_match = *ptr1; ++ len1 = len; ++ } else { ++ *ptr0 = cur_match; ++ ptr0 = pair; ++ cur_match = *ptr0; ++ len0 = len; ++ } ++ } ++} ++ ++ ++static void ++bt_skip_func( ++ const uint32_t len_limit, ++ const uint32_t pos, ++ const uint8_t *const cur, ++ uint32_t cur_match, ++ uint32_t depth, ++ uint32_t *const son, ++ const uint32_t cyclic_pos, ++ const uint32_t cyclic_size) ++{ ++ uint32_t *ptr0 = son + (cyclic_pos << 1) + 1; ++ uint32_t *ptr1 = son + (cyclic_pos << 1); ++ ++ uint32_t len0 = 0; ++ uint32_t len1 = 0; ++ ++ while (1) { ++ const uint32_t delta = pos - cur_match; ++ if (depth-- == 0 || delta >= cyclic_size) { ++ *ptr0 = 0; ++ *ptr1 = 0; ++ return; ++ } ++ ++ uint32_t *pair = son + ((cyclic_pos - delta ++ + (delta > cyclic_pos ? cyclic_size : 0)) ++ << 1); ++ const uint8_t *pb = cur - delta; ++ uint32_t len = ((len0) < (len1) ? (len0) : (len1)); ++ ++ if (pb[len] == cur[len]) { ++ len = lzma_memcmplen(pb, cur, len + 1, len_limit); ++ ++ if (len == len_limit) { ++ *ptr1 = pair[0]; ++ *ptr0 = pair[1]; ++ return; ++ } ++ } ++ ++ if (pb[len] < cur[len]) { ++ *ptr1 = cur_match; ++ ptr1 = pair + 1; ++ cur_match = *ptr1; ++ len1 = len; ++ } else { ++ *ptr0 = cur_match; ++ ptr0 = pair; ++ cur_match = *ptr0; ++ len0 = len; ++ } ++ } ++} ++ ++extern uint32_t ++lzma_mf_bt2_find(lzma_mf *mf, lzma_match *matches) ++{ ++ uint32_t len_limit = mf_avail(mf); ++ if (mf->nice_len <= len_limit) { ++ len_limit = mf->nice_len; ++ } else if (len_limit < (2) || (mf->action == LZMA_SYNC_FLUSH)) { ++ move_pending(mf); ++ return 0; ++ } ++ const uint8_t *cur = mf_ptr(mf); ++ const uint32_t pos = mf->read_pos + mf->offset; ++ uint32_t matches_count = 0; ++ const uint32_t hash_value = read16ne(cur); ++ const uint32_t cur_match = mf->hash[hash_value]; ++ mf->hash[hash_value] = pos; ++ ++ matches_count = bt_find_func(len_limit, pos, cur, cur_match, mf->depth, ++ mf->son, mf->cyclic_pos, mf->cyclic_size, ++ matches + matches_count, 1) - matches; ++ move_pos(mf); ++ return matches_count; ++} ++ ++extern void ++lzma_mf_bt2_skip(lzma_mf *mf, uint32_t amount) ++{ ++ do { ++ uint32_t len_limit = mf_avail(mf); ++ if (mf->nice_len <= len_limit) { ++ len_limit = mf->nice_len; ++ } else if (len_limit < (2) || (mf->action == LZMA_SYNC_FLUSH)) { ++ move_pending(mf); ++ continue; ++ } ++ const uint8_t *cur = mf_ptr(mf); ++ const uint32_t pos = mf->read_pos + mf->offset; ++ ++ const uint32_t hash_value = read16ne(cur); ++ const uint32_t cur_match = mf->hash[hash_value]; ++ mf->hash[hash_value] = pos; ++ ++ bt_skip_func(len_limit, pos, cur, cur_match, mf->depth, mf->son, ++ mf->cyclic_pos, mf->cyclic_size); ++ move_pos(mf); ++ } while (--amount != 0); ++} ++ ++extern uint32_t ++lzma_mf_bt3_find(lzma_mf *mf, lzma_match *matches) ++{ ++ uint32_t len_limit = mf_avail(mf); ++ if (mf->nice_len <= len_limit) { ++ len_limit = mf->nice_len; ++ } else if (len_limit < (3) || (1 && mf->action == LZMA_SYNC_FLUSH)) { ++ move_pending(mf); ++ return 0; ++ } ++ const uint8_t *cur = mf_ptr(mf); ++ const uint32_t pos = mf->read_pos + mf->offset; ++ uint32_t matches_count = 0; ++ ++ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1]; ++ const uint32_t hash_2_value = temp & ((1U << 10) - 1); ++ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask; ++ ++ const uint32_t delta2 = pos - mf->hash[hash_2_value]; ++ const uint32_t cur_match = mf->hash[((1U << 10)) + hash_value]; ++ ++ mf->hash[hash_2_value] = pos; ++ mf->hash[((1U << 10)) + hash_value] = pos; ++ ++ uint32_t len_best = 2; ++ ++ if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { ++ len_best = lzma_memcmplen( ++ cur, cur - delta2, len_best, len_limit); ++ ++ matches[0].len = len_best; ++ matches[0].dist = delta2 - 1; ++ matches_count = 1; ++ ++ if (len_best == len_limit) { ++ bt_skip_func(len_limit, pos, cur, cur_match, mf->depth, mf->son, ++ mf->cyclic_pos, mf->cyclic_size); ++ move_pos(mf); ++ return 1; ++ } ++ } ++ ++ matches_count = bt_find_func(len_limit, pos, cur, cur_match, mf->depth, ++ mf->son, mf->cyclic_pos, mf->cyclic_size, ++ matches + matches_count, len_best) - matches; ++ move_pos(mf); ++ return matches_count; ++} ++ ++ ++extern void ++lzma_mf_bt3_skip(lzma_mf *mf, uint32_t amount) ++{ ++ do { ++ uint32_t len_limit = mf_avail(mf); ++ if (mf->nice_len <= len_limit) { ++ len_limit = mf->nice_len; } ++ else if (len_limit < (3) || (1 && mf->action == LZMA_SYNC_FLUSH)) { ++ move_pending(mf); ++ continue; ++ } ++ const uint8_t *cur = mf_ptr(mf); ++ const uint32_t pos = mf->read_pos + mf->offset; ++ ++ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1]; ++ const uint32_t hash_2_value = temp & ((1U << 10) - 1); ++ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask; ++ ++ const uint32_t cur_match = mf->hash[((1U << 10)) + hash_value]; ++ ++ mf->hash[hash_2_value] = pos; ++ mf->hash[((1U << 10)) + hash_value] = pos; ++ ++ bt_skip_func(len_limit, pos, cur, cur_match, mf->depth, mf->son, ++ mf->cyclic_pos, mf->cyclic_size); ++ move_pos(mf); ++ } while (--amount != 0); ++} ++ ++extern uint32_t ++lzma_mf_bt4_find(lzma_mf *mf, lzma_match *matches) ++{ ++ uint32_t len_limit = mf->write_pos - mf->read_pos; ++ if (mf->nice_len <= len_limit) { ++ len_limit = mf->nice_len; ++ } else if (len_limit < (4) || (mf->action == LZMA_SYNC_FLUSH)) { ++ ++mf->read_pos; ++ ++mf->pending; ++ return 0; ++ } ++ ++ const uint8_t *cur = mf->buffer + mf->read_pos; ++ const uint32_t pos = mf->read_pos + mf->offset; ++ uint32_t matches_count = 0; ++ ++ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1]; ++ const uint32_t hash_2_value = temp & ((1U << 10) - 1); ++ const uint32_t hash_3_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & ((1U << 16) - 1); ++ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8) ++ ^ (lzma_crc32_table[0][cur[3]] << 5)) ++ & mf->hash_mask; ++ ++ uint32_t delta2 = pos - mf->hash[hash_2_value]; ++ const uint32_t delta3 = pos - mf->hash[((1U << 10)) + hash_3_value]; ++ const uint32_t cur_match = mf->hash[((1U << 10) + (1U << 16)) + hash_value]; ++ ++ mf->hash[hash_2_value] = pos; ++ mf->hash[((1U << 10)) + hash_3_value] = pos; ++ mf->hash[((1U << 10) + (1U << 16)) + hash_value] = pos; ++ ++ uint32_t len_best = 1; ++ ++ if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { ++ len_best = 2; ++ matches[0].len = 2; ++ matches[0].dist = delta2 - 1; ++ matches_count = 1; ++ } ++ ++ if (delta2 != delta3 && delta3 < mf->cyclic_size && *(cur - delta3) == *cur) { ++ len_best = 3; ++ matches[matches_count++].dist = delta3 - 1; ++ delta2 = delta3; ++ } ++ ++ if (matches_count != 0) { ++ len_best = lzma_memcmplen(cur, cur - delta2, len_best, len_limit); ++ ++ matches[matches_count - 1].len = len_best; ++ ++ if (len_best == len_limit) { ++ bt_skip_func(len_limit, pos, cur, cur_match, mf->depth, mf->son, ++ mf->cyclic_pos, mf->cyclic_size); ++ move_pos(mf); ++ return matches_count; ++ } ++ } ++ ++ if (len_best < 3) ++ len_best = 3; ++ ++ matches_count = bt_find_func(len_limit, pos, cur, cur_match, mf->depth, mf->son, ++ mf->cyclic_pos, mf->cyclic_size, ++ matches + matches_count, len_best) - matches; ++ move_pos(mf); ++ return matches_count; ++} ++ ++extern void ++lzma_mf_bt4_skip(lzma_mf *mf, uint32_t amount) ++{ ++ do { ++ uint32_t len_limit = mf_avail(mf); ++ if (mf->nice_len <= len_limit) { ++ len_limit = mf->nice_len; ++ } else if (len_limit < (4) || (mf->action == LZMA_SYNC_FLUSH)) { ++ move_pending(mf); ++ continue; ++ } ++ ++ const uint8_t *cur = mf->buffer + mf->read_pos; ++ const uint32_t pos = mf->read_pos + mf->offset; ++ ++ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1]; ++ const uint32_t hash_2_value = temp & ((1U << 10) - 1); ++ const uint32_t hash_3_value = (temp ^ ((uint32_t)(cur[2]) << 8)) ++ & ((1U << 16) - 1); ++ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8) ++ ^ (lzma_crc32_table[0][cur[3]] << 5)) ++ & mf->hash_mask; ++ ++ const uint32_t cur_match = mf->hash[((1U << 10) + (1U << 16)) + hash_value]; ++ ++ mf->hash[hash_2_value] = pos; ++ mf->hash[((1U << 10)) + hash_3_value] = pos; ++ mf->hash[((1U << 10) + (1U << 16)) + hash_value] = pos; ++ ++ bt_skip_func(len_limit, pos, cur, cur_match, mf->depth, mf->son, ++ mf->cyclic_pos, mf->cyclic_size); ++ move_pos(mf); ++ } while (--amount != 0); ++} ++ ++static inline void ++mf_skip(lzma_mf *mf, uint32_t amount) ++{ ++ if (amount != 0) { ++ mf->skip(mf, amount); ++ mf->read_ahead += amount; ++ } ++} ++ ++typedef struct lzma_lzma1_encoder_s lzma_lzma1_encoder; ++typedef uint16_t probability; ++ ++typedef struct { ++ probability choice; ++ probability choice2; ++ probability low[(1 << 4)][(1 << 3)]; ++ probability mid[(1 << 4)][(1 << 3)]; ++ probability high[(1 << 8)]; ++ uint32_t prices[(1 << 4)][((1 << 3) + (1 << 3) + (1 << 8))]; ++ uint32_t table_size; ++ uint32_t counters[(1 << 4)]; ++} lzma_length_encoder; ++ ++typedef struct { ++ uint64_t low; ++ uint64_t cache_size; ++ uint32_t range; ++ uint8_t cache; ++ size_t count; ++ size_t pos; ++ ++ enum { ++ RC_BIT_0, ++ RC_BIT_1, ++ RC_DIRECT_0, ++ RC_DIRECT_1, ++ RC_FLUSH, ++ } symbols[58]; ++ ++ probability *probs[58]; ++} lzma_range_encoder; ++ ++ ++typedef enum { ++ STATE_LIT_LIT, ++ STATE_MATCH_LIT_LIT, ++ STATE_REP_LIT_LIT, ++ STATE_SHORTREP_LIT_LIT, ++ STATE_MATCH_LIT, ++ STATE_REP_LIT, ++ STATE_SHORTREP_LIT, ++ STATE_LIT_MATCH, ++ STATE_LIT_LONGREP, ++ STATE_LIT_SHORTREP, ++ STATE_NONLIT_MATCH, ++ STATE_NONLIT_REP, ++} lzma_lzma_state; ++ ++typedef struct { ++ lzma_lzma_state state; ++ _Bool prev_1_is_literal; ++ _Bool prev_2; ++ ++ uint32_t pos_prev_2; ++ uint32_t back_prev_2; ++ ++ uint32_t price; ++ uint32_t pos_prev; ++ uint32_t back_prev; ++ ++ uint32_t backs[4]; ++} lzma_optimal; ++ ++struct lzma_lzma1_encoder_s { ++ lzma_range_encoder rc; ++ lzma_lzma_state state; ++ uint32_t reps[4]; ++ lzma_match matches[(2 + ((1 << 3) + (1 << 3) + (1 << 8)) - 1) + 1]; ++ uint32_t matches_count; ++ uint32_t longest_match_length; ++ _Bool fast_mode; ++ _Bool is_initialized; ++ _Bool is_flushed; ++ uint32_t pos_mask; ++ uint32_t literal_context_bits; ++ uint32_t literal_pos_mask; ++ ++ probability literal[(1 << 4)][0x300]; ++ probability is_match[12][(1 << 4)]; ++ probability is_rep[12]; ++ probability is_rep0[12]; ++ probability is_rep1[12]; ++ probability is_rep2[12]; ++ probability is_rep0_long[12][(1 << 4)]; ++ probability dist_slot[4][(1 << 6)]; ++ probability dist_special[(1 << (14 / 2)) - 14]; ++ probability dist_align[(1 << 4)]; ++ ++ lzma_length_encoder match_len_encoder; ++ lzma_length_encoder rep_len_encoder; ++ ++ uint32_t dist_slot_prices[4][(1 << 6)]; ++ uint32_t dist_prices[4][(1 << (14 / 2))]; ++ uint32_t dist_table_size; ++ uint32_t match_price_count; ++ ++ uint32_t align_prices[(1 << 4)]; ++ uint32_t align_price_count; ++ uint32_t opts_end_index; ++ uint32_t opts_current_index; ++ lzma_optimal opts[(1 << 12)]; ++}; ++ ++extern void ++lzma_lzma_optimum_fast(lzma_lzma1_encoder *restrict coder, ++ lzma_mf *restrict mf, ++ uint32_t *restrict back_res, uint32_t *restrict len_res) ++{ ++ const uint32_t nice_len = mf->nice_len; ++ ++ uint32_t len_main; ++ uint32_t matches_count; ++ if (mf->read_ahead == 0) { ++ len_main = lzma_mf_find(mf, &matches_count, coder->matches); ++ } else { ++ len_main = coder->longest_match_length; ++ matches_count = coder->matches_count; ++ } ++ ++ const uint8_t *buf = mf_ptr(mf) - 1; ++ const uint32_t buf_avail ++ = ((mf_avail(mf) + 1) < ((2 + ((1 << 3) + (1 << 3) + (1 << 8)) - 1)) ++ ? (mf_avail(mf) + 1) : ((2 + ((1 << 3) + (1 << 3) + (1 << 8)) - 1))); ++ ++ if (buf_avail < 2) { ++ *back_res = (4294967295U); ++ *len_res = 1; ++ return; ++ } ++ ++ uint32_t rep_len = 0; ++ uint32_t rep_index = 0; ++ ++ for (uint32_t i = 0; i < 4; ++i) { ++ const uint8_t *const buf_back = buf - coder->reps[i] - 1; ++ if ((read16ne(buf) != read16ne(buf_back))) ++ continue; ++ const uint32_t len = lzma_memcmplen(buf, buf_back, 2, buf_avail); ++ if (len >= nice_len) { ++ *back_res = i; ++ *len_res = len; ++ mf_skip(mf, len - 1); ++ return; ++ } ++ if (len > rep_len) { ++ rep_index = i; ++ rep_len = len; ++ } ++ } ++ if (len_main >= nice_len) { ++ *back_res = coder->matches[matches_count - 1].dist + 4; ++ *len_res = len_main; ++ mf_skip(mf, len_main - 1); ++ return; ++ } ++ ++ uint32_t back_main = 0; ++ if (len_main >= 2) { ++ back_main = coder->matches[matches_count - 1].dist; ++ while (matches_count > 1 && len_main == ++ coder->matches[matches_count - 2].len + 1) { ++ if (!(((back_main) >> 7) > (coder->matches[ matches_count - 2].dist))) ++ break; ++ --matches_count; ++ len_main = coder->matches[matches_count - 1].len; ++ back_main = coder->matches[matches_count - 1].dist; ++ } ++ if (len_main == 2 && back_main >= 0x80) ++ len_main = 1; ++ } ++ ++ if (rep_len >= 2) { ++ if (rep_len + 1 >= len_main ++ || (rep_len + 2 >= len_main ++ && back_main > (1U << 9)) ++ || (rep_len + 3 >= len_main ++ && back_main > (1U << 15))) { ++ *back_res = rep_index; ++ *len_res = rep_len; ++ mf_skip(mf, rep_len - 1); ++ return; ++ } ++ } ++ ++ if (len_main < 2 || buf_avail <= 2) { ++ *back_res = (4294967295U); ++ *len_res = 1; ++ return; ++ } ++ ++ coder->longest_match_length = lzma_mf_find(mf, ++ &coder->matches_count, coder->matches); ++ ++ if (coder->longest_match_length >= 2) { ++ const uint32_t new_dist = coder->matches[ ++ coder->matches_count - 1].dist; ++ ++ if ((coder->longest_match_length >= len_main ++ && new_dist < back_main) ++ || (coder->longest_match_length == len_main + 1 ++ && !(((new_dist) >> 7) > (back_main))) ++ || (coder->longest_match_length > len_main + 1) ++ || (coder->longest_match_length + 1 >= len_main ++ && len_main >= 3 ++ && (((back_main) >> 7) > (new_dist)))) { ++ *back_res = (4294967295U); ++ *len_res = 1; ++ return; ++ } ++ } ++ ++buf; ++ const uint32_t limit = ((2) > (len_main - 1) ? (2) : (len_main - 1)); ++ for (uint32_t i = 0; i < 4; ++i) { ++ if (memcmp(buf, buf - coder->reps[i] - 1, limit) == 0) { ++ *back_res = (4294967295U); ++ *len_res = 1; ++ return; ++ } ++ } ++ ++ *back_res = back_main + 4; ++ *len_res = len_main; ++ mf_skip(mf, len_main - 2); ++ return; ++} ++ ++static inline void ++rc_bit(lzma_range_encoder *rc, probability *prob, uint32_t bit) ++{ ++ rc->symbols[rc->count] = bit; ++ rc->probs[rc->count] = prob; ++ ++rc->count; ++} ++ ++static inline void ++rc_bittree(lzma_range_encoder *rc, probability *probs, ++ uint32_t bit_count, uint32_t symbol) ++{ ++ uint32_t model_index = 1; ++ ++ do { ++ const uint32_t bit = (symbol >> --bit_count) & 1; ++ rc_bit(rc, &probs[model_index], bit); ++ model_index = (model_index << 1) + bit; ++ } while (bit_count != 0); ++} ++ ++static _Bool ++encode_init(lzma_lzma1_encoder *coder, lzma_mf *mf) ++{ ++ if (mf->read_pos == mf->read_limit) { ++ if (mf->action == LZMA_RUN) ++ return 0; ++ } else { ++ mf_skip(mf, 1); ++ mf->read_ahead = 0; ++ rc_bit(&coder->rc, &coder->is_match[0][0], 0); ++ rc_bittree(&coder->rc, coder->literal[0], 8, mf->buffer[0]); ++ } ++ ++ coder->is_initialized = 1; ++ ++ return 1; ++} ++ ++static inline uint32_t ++mf_position(const lzma_mf *mf) ++{ ++ return mf->read_pos - mf->read_ahead; ++} ++ ++static inline _Bool ++rc_shift_low(lzma_range_encoder *rc, ++ uint8_t *out, size_t *out_pos, size_t out_size) ++{ ++ if ((uint32_t)(rc->low) < (uint32_t)(0xFF000000) ++ || (uint32_t)(rc->low >> 32) != 0) { ++ do { ++ if (*out_pos == out_size) ++ return 1; ++ ++ out[*out_pos] = rc->cache + (uint8_t)(rc->low >> 32); ++ ++*out_pos; ++ rc->cache = 0xFF; ++ } while (--rc->cache_size != 0); ++ rc->cache = (rc->low >> 24) & 0xFF; ++ } ++ ++ ++rc->cache_size; ++ rc->low = (rc->low & 0x00FFFFFF) << 8; ++ return 0; ++} ++ ++static inline void ++rc_reset(lzma_range_encoder *rc) ++{ ++ rc->low = 0; ++ rc->cache_size = 1; ++ rc->range = (4294967295U); ++ rc->cache = 0; ++ rc->count = 0; ++ rc->pos = 0; ++} ++ ++static inline _Bool ++rc_encode(lzma_range_encoder *rc, ++ uint8_t *out, size_t *out_pos, size_t out_size) ++{ ++ while (rc->pos < rc->count) { ++ if (rc->range < (1U << 24)) { ++ if (rc_shift_low(rc, out, out_pos, out_size)) ++ return 1; ++ rc->range <<= 8; ++ } ++ ++ switch (rc->symbols[rc->pos]) { ++ case RC_BIT_0: { ++ probability prob = *rc->probs[rc->pos]; ++ rc->range = (rc->range >> 11) ++ * prob; ++ prob += ((1U << 11) - prob) >> 5; ++ *rc->probs[rc->pos] = prob; ++ break; ++ } ++ ++ case RC_BIT_1: { ++ probability prob = *rc->probs[rc->pos]; ++ const uint32_t bound = prob * (rc->range ++ >> 11); ++ rc->low += bound; ++ rc->range -= bound; ++ prob -= prob >> 5; ++ *rc->probs[rc->pos] = prob; ++ break; ++ } ++ ++ case RC_DIRECT_0: ++ rc->range >>= 1; ++ break; ++ ++ case RC_DIRECT_1: ++ rc->range >>= 1; ++ rc->low += rc->range; ++ break; ++ ++ case RC_FLUSH: ++ rc->range = (4294967295U); ++ do { ++ if (rc_shift_low(rc, out, out_pos, out_size)) ++ return 1; ++ } while (++rc->pos < rc->count); ++ ++ rc_reset(rc); ++ return 0; ++ ++ default: ++ break; ++ } ++ ++rc->pos; ++ } ++ ++ rc->count = 0; ++ rc->pos = 0; ++ return 0; ++} ++ ++static inline uint64_t ++rc_pending(const lzma_range_encoder *rc) ++{ ++ return rc->cache_size + 5 - 1; ++} ++ ++static inline void ++literal_matched(lzma_range_encoder *rc, probability *subcoder, ++ uint32_t match_byte, uint32_t symbol) ++{ ++ uint32_t offset = 0x100; ++ symbol += 1U << 8; ++ ++ do { ++ match_byte <<= 1; ++ const uint32_t match_bit = match_byte & offset; ++ const uint32_t subcoder_index ++ = offset + match_bit + (symbol >> 8); ++ const uint32_t bit = (symbol >> 7) & 1; ++ rc_bit(rc, &subcoder[subcoder_index], bit); ++ ++ symbol <<= 1; ++ offset &= ~(match_byte ^ symbol); ++ ++ } while (symbol < (1U << 16)); ++} ++ ++static inline void ++literal(lzma_lzma1_encoder *coder, lzma_mf *mf, uint32_t position) ++{ ++ const uint8_t cur_byte = mf->buffer[mf->read_pos - mf->read_ahead]; ++ probability *subcoder = ((coder->literal)[ ++ (((position) & (coder->literal_pos_mask)) ++ << (coder->literal_context_bits)) ++ + ((uint32_t)(mf->buffer[mf->read_pos - mf->read_ahead - 1]) ++ >> (8U - (coder->literal_context_bits)))]); ++ ++ if (((coder->state) < 7)) { ++ rc_bittree(&coder->rc, subcoder, 8, cur_byte); ++ } else { ++ const uint8_t match_byte ++ = mf->buffer[mf->read_pos - coder->reps[0] - 1 - mf->read_ahead]; ++ literal_matched(&coder->rc, subcoder, match_byte, cur_byte); ++ } ++ coder->state ++ = ((coder->state) <= STATE_SHORTREP_LIT_LIT ++ ? STATE_LIT_LIT : ((coder->state) <= STATE_LIT_SHORTREP ++ ? (coder->state) - 3 : (coder->state) - 6)); ++} ++ ++const uint8_t lzma_rc_prices[] = { ++ 128, 103, 91, 84, 78, 73, 69, 66, ++ 63, 61, 58, 56, 54, 52, 51, 49, ++ 48, 46, 45, 44, 43, 42, 41, 40, ++ 39, 38, 37, 36, 35, 34, 34, 33, ++ 32, 31, 31, 30, 29, 29, 28, 28, ++ 27, 26, 26, 25, 25, 24, 24, 23, ++ 23, 22, 22, 22, 21, 21, 20, 20, ++ 19, 19, 19, 18, 18, 17, 17, 17, ++ 16, 16, 16, 15, 15, 15, 14, 14, ++ 14, 13, 13, 13, 12, 12, 12, 11, ++ 11, 11, 11, 10, 10, 10, 10, 9, ++ 9, 9, 9, 8, 8, 8, 8, 7, ++ 7, 7, 7, 6, 6, 6, 6, 5, ++ 5, 5, 5, 5, 4, 4, 4, 4, ++ 3, 3, 3, 3, 3, 2, 2, 2, ++ 2, 2, 2, 1, 1, 1, 1, 1 ++}; ++ ++static inline uint32_t ++rc_bit_price(const probability prob, const uint32_t bit) ++{ ++ return lzma_rc_prices[(prob ^ ((0U - bit) ++ & ((1U << 11) - 1))) >> 4]; ++} ++ ++static inline uint32_t ++rc_bit_0_price(const probability prob) ++{ ++ return lzma_rc_prices[prob >> 4]; ++} ++ ++static inline uint32_t ++rc_bit_1_price(const probability prob) ++{ ++ return lzma_rc_prices[(prob ^ ((1U << 11) - 1)) ++ >> 4]; ++} ++ ++static inline uint32_t ++rc_bittree_price(const probability *const probs, ++ const uint32_t bit_levels, uint32_t symbol) ++{ ++ uint32_t price = 0; ++ symbol += 1U << bit_levels; ++ ++ do { ++ const uint32_t bit = symbol & 1; ++ symbol >>= 1; ++ price += rc_bit_price(probs[symbol], bit); ++ } while (symbol != 1); ++ ++ return price; ++} ++ ++static void ++length_update_prices(lzma_length_encoder *lc, const uint32_t pos_state) ++{ ++ const uint32_t table_size = lc->table_size; ++ lc->counters[pos_state] = table_size; ++ ++ const uint32_t a0 = rc_bit_0_price(lc->choice); ++ const uint32_t a1 = rc_bit_1_price(lc->choice); ++ const uint32_t b0 = a1 + rc_bit_0_price(lc->choice2); ++ const uint32_t b1 = a1 + rc_bit_1_price(lc->choice2); ++ uint32_t *const prices = lc->prices[pos_state]; ++ ++ uint32_t i; ++ for (i = 0; i < table_size && i < (1 << 3); ++i) ++ prices[i] = a0 + rc_bittree_price(lc->low[pos_state], ++ 3, i); ++ ++ for (; i < table_size && i < (1 << 3) + (1 << 3); ++i) ++ prices[i] = b0 + rc_bittree_price(lc->mid[pos_state], ++ 3, i - (1 << 3)); ++ ++ for (; i < table_size; ++i) ++ prices[i] = b1 + rc_bittree_price(lc->high, 8, ++ i - (1 << 3) - (1 << 3)); ++ ++ return; ++} ++ ++static inline void ++length(lzma_range_encoder *rc, lzma_length_encoder *lc, ++ const uint32_t pos_state, uint32_t len, const _Bool fast_mode) ++{ ++ len -= 2; ++ ++ if (len < (1 << 3)) { ++ rc_bit(rc, &lc->choice, 0); ++ rc_bittree(rc, lc->low[pos_state], 3, len); ++ } else { ++ rc_bit(rc, &lc->choice, 1); ++ len -= (1 << 3); ++ ++ if (len < (1 << 3)) { ++ rc_bit(rc, &lc->choice2, 0); ++ rc_bittree(rc, lc->mid[pos_state], 3, len); ++ } else { ++ rc_bit(rc, &lc->choice2, 1); ++ len -= (1 << 3); ++ rc_bittree(rc, lc->high, 8, len); ++ } ++ } ++ ++ if (!fast_mode) ++ if (--lc->counters[pos_state] == 0) ++ length_update_prices(lc, pos_state); ++} ++ ++static inline void ++rep_match(lzma_lzma1_encoder *coder, const uint32_t pos_state, ++ const uint32_t rep, const uint32_t len) ++{ ++ if (rep == 0) { ++ rc_bit(&coder->rc, &coder->is_rep0[coder->state], 0); ++ rc_bit(&coder->rc, ++ &coder->is_rep0_long[coder->state][pos_state], ++ len != 1); ++ } else { ++ const uint32_t distance = coder->reps[rep]; ++ rc_bit(&coder->rc, &coder->is_rep0[coder->state], 1); ++ ++ if (rep == 1) { ++ rc_bit(&coder->rc, &coder->is_rep1[coder->state], 0); ++ } else { ++ rc_bit(&coder->rc, &coder->is_rep1[coder->state], 1); ++ rc_bit(&coder->rc, &coder->is_rep2[coder->state], ++ rep - 2); ++ ++ if (rep == 3) ++ coder->reps[3] = coder->reps[2]; ++ ++ coder->reps[2] = coder->reps[1]; ++ } ++ ++ coder->reps[1] = coder->reps[0]; ++ coder->reps[0] = distance; ++ } ++ ++ if (len == 1) { ++ coder->state = ((coder->state) < 7 ? STATE_LIT_SHORTREP : STATE_NONLIT_REP); ++ } else { ++ length(&coder->rc, &coder->rep_len_encoder, pos_state, len, ++ coder->fast_mode); ++ coder->state = ((coder->state) < 7 ? STATE_LIT_LONGREP : STATE_NONLIT_REP); ++ } ++} ++ ++// This array is constantly initialized in the original code. It's quite big ++// so we skip it. ++const uint8_t lzma_fastpos[1 << 13]; ++ ++static inline uint32_t ++get_dist_slot(uint32_t dist) ++{ ++ if (dist < (1U << (13 + ((0) + (0) * (13 - 1))))) ++ return lzma_fastpos[dist]; ++ ++ if (dist < (1U << (13 + ((0) + (1) * (13 - 1))))) ++ return (uint32_t)(lzma_fastpos[(dist) >> ((0) + (1) * (13 - 1))]) + 2 * ((0) + (1) * (13 - 1)); ++ ++ return (uint32_t)(lzma_fastpos[(dist) >> ((0) + (2) * (13 - 1))]) + 2 * ((0) + (2) * (13 - 1)); ++} ++ ++static inline void ++rc_bittree_reverse(lzma_range_encoder *rc, probability *probs, ++ uint32_t bit_count, uint32_t symbol) ++{ ++ uint32_t model_index = 1; ++ do { ++ const uint32_t bit = symbol & 1; ++ symbol >>= 1; ++ rc_bit(rc, &probs[model_index], bit); ++ model_index = (model_index << 1) + bit; ++ } while (--bit_count != 0); ++} ++ ++static inline void ++rc_direct(lzma_range_encoder *rc, uint32_t value, uint32_t bit_count) ++{ ++ do { ++ rc->symbols[rc->count++] ++ = RC_DIRECT_0 + ((value >> --bit_count) & 1); ++ } while (bit_count != 0); ++} ++ ++static inline void ++match(lzma_lzma1_encoder *coder, const uint32_t pos_state, ++ const uint32_t distance, const uint32_t len) ++{ ++ coder->state = ((coder->state) < 7 ? STATE_LIT_MATCH : STATE_NONLIT_MATCH); ++ ++ length(&coder->rc, &coder->match_len_encoder, pos_state, len, ++ coder->fast_mode); ++ ++ const uint32_t dist_slot = get_dist_slot(distance); ++ const uint32_t dist_state = ((len) < 4 + 2 ? (len) - 2 : 4 - 1); ++ rc_bittree(&coder->rc, coder->dist_slot[dist_state], 6, dist_slot); ++ ++ if (dist_slot >= 4) { ++ const uint32_t footer_bits = (dist_slot >> 1) - 1; ++ const uint32_t base = (2 | (dist_slot & 1)) << footer_bits; ++ const uint32_t dist_reduced = distance - base; ++ ++ if (dist_slot < 14) { ++ rc_bittree_reverse(&coder->rc, coder->dist_special + base - dist_slot - 1, ++ footer_bits, dist_reduced); ++ } else { ++ rc_direct(&coder->rc, dist_reduced >> 4, ++ footer_bits - 4); ++ rc_bittree_reverse( ++ &coder->rc, coder->dist_align, ++ 4, dist_reduced & ((1 << 4) - 1)); ++ ++coder->align_price_count; ++ } ++ } ++ ++ coder->reps[3] = coder->reps[2]; ++ coder->reps[2] = coder->reps[1]; ++ coder->reps[1] = coder->reps[0]; ++ coder->reps[0] = distance; ++ ++coder->match_price_count; ++} ++ ++static void ++encode_symbol(lzma_lzma1_encoder *coder, lzma_mf *mf, ++ uint32_t back, uint32_t len, uint32_t position) ++{ ++ const uint32_t pos_state = position & coder->pos_mask; ++ ++ if (back == (4294967295U)) { ++ rc_bit(&coder->rc, ++ &coder->is_match[coder->state][pos_state], 0); ++ literal(coder, mf, position); ++ } else { ++ rc_bit(&coder->rc, ++ &coder->is_match[coder->state][pos_state], 1); ++ ++ if (back < 4) { ++ rc_bit(&coder->rc, &coder->is_rep[coder->state], 1); ++ rep_match(coder, pos_state, back, len); ++ } else { ++ rc_bit(&coder->rc, &coder->is_rep[coder->state], 0); ++ match(coder, pos_state, back - 4, len); ++ } ++ } ++ mf->read_ahead -= len; ++} ++ ++static void ++encode_eopm(lzma_lzma1_encoder *coder, uint32_t position) ++{ ++ const uint32_t pos_state = position & coder->pos_mask; ++ rc_bit(&coder->rc, &coder->is_match[coder->state][pos_state], 1); ++ rc_bit(&coder->rc, &coder->is_rep[coder->state], 0); ++ match(coder, pos_state, (4294967295U), 2); ++} ++ ++static inline void ++rc_flush(lzma_range_encoder *rc) ++{ ++ for (size_t i = 0; i < 5; ++i) ++ rc->symbols[rc->count++] = RC_FLUSH; ++} ++ ++extern void exit (int __status) ++ __attribute__ ((__nothrow__ , __leaf__ , __noreturn__)); ++ ++extern lzma_ret ++lzma_lzma_encode(lzma_lzma1_encoder *restrict coder, lzma_mf *restrict mf, ++ uint8_t *restrict out, size_t *restrict out_pos, ++ size_t out_size, uint32_t limit) ++{ ++ ++ if (!coder->is_initialized && !encode_init(coder, mf)) ++ return LZMA_OK; ++ ++ uint32_t position = mf_position(mf); ++ ++ while (1) { ++ if (rc_encode(&coder->rc, out, out_pos, out_size)) { ++ return LZMA_OK; ++ } ++ ++ if (limit != (4294967295U) ++ && (mf->read_pos - mf->read_ahead >= limit ++ || *out_pos + rc_pending(&coder->rc) ++ >= (1U << 16) - ((1 << 12) + 1))) ++ break; ++ ++ if (mf->read_pos >= mf->read_limit) { ++ if (mf->action == LZMA_RUN) ++ return LZMA_OK; ++ ++ ++ if (mf->read_ahead == 0) ++ break; ++ } ++ uint32_t len; ++ uint32_t back; ++ ++ if (coder->fast_mode) ++ lzma_lzma_optimum_fast(coder, mf, &back, &len); ++ else ++ // The original code contains the call to ++ // lzma_lzma_optimum_normal(coder, mf, &back, &len, position); ++ exit (-1); ++ ++ encode_symbol(coder, mf, back, len, position); ++ ++ position += len; ++ } ++ ++ if (!coder->is_flushed) { ++ coder->is_flushed = 1; ++ if (limit == (4294967295U)) ++ encode_eopm(coder, position); ++ ++ rc_flush(&coder->rc); ++ ++ if (rc_encode(&coder->rc, out, out_pos, out_size)) { ++ return LZMA_OK; ++ } ++ } ++ ++ coder->is_flushed = 0; ++ return LZMA_STREAM_END; ++} ++ ++extern void ++lzma_free(void *ptr, const lzma_allocator *allocator) ++{ ++ if (allocator != ((void *)0) && allocator->free != ((void *)0)) ++ allocator->free(allocator->opaque, ptr); ++ else ++ free(ptr); ++ return; ++} ++ ++static _Bool ++lz_encoder_prepare(lzma_mf *mf, const lzma_allocator *allocator, ++ const lzma_lz_options *lz_options) ++{ ++ if (lz_options->dict_size < 4096U ++ || lz_options->dict_size ++ > (1U << 30) + (1U << 29) ++ || lz_options->nice_len > lz_options->match_len_max) ++ return 1; ++ ++ mf->keep_size_before = lz_options->before_size + lz_options->dict_size; ++ mf->keep_size_after = lz_options->after_size ++ + lz_options->match_len_max; ++ uint32_t reserve = lz_options->dict_size / 2; ++ if (reserve > (1U << 30)) ++ reserve /= 2; ++ ++ reserve += (lz_options->before_size + lz_options->match_len_max ++ + lz_options->after_size) / 2 + (1U << 19); ++ ++ const uint32_t old_size = mf->size; ++ mf->size = mf->keep_size_before + reserve + mf->keep_size_after; ++ ++ if ((mf->buffer != ((void *)0)) && old_size != mf->size) { ++ lzma_free(mf->buffer, allocator); ++ mf->buffer = ((void *)0); ++ } ++ ++ mf->match_len_max = lz_options->match_len_max; ++ mf->nice_len = lz_options->nice_len; ++ mf->cyclic_size = lz_options->dict_size + 1; ++ ++ switch (lz_options->match_finder) { ++ case LZMA_MF_HC3: ++ mf->find = &lzma_mf_hc3_find; ++ mf->skip = &lzma_mf_hc3_skip; ++ break; ++ ++ case LZMA_MF_HC4: ++ mf->find = &lzma_mf_hc4_find; ++ mf->skip = &lzma_mf_hc4_skip; ++ break; ++ ++ case LZMA_MF_BT2: ++ mf->find = &lzma_mf_bt2_find; ++ mf->skip = &lzma_mf_bt2_skip; ++ break; ++ ++ case LZMA_MF_BT3: ++ mf->find = &lzma_mf_bt3_find; ++ mf->skip = &lzma_mf_bt3_skip; ++ break; ++ ++ case LZMA_MF_BT4: ++ mf->find = &lzma_mf_bt4_find; ++ mf->skip = &lzma_mf_bt4_skip; ++ break; ++ ++ default: ++ return 1; ++ } ++ ++ const uint32_t hash_bytes = lz_options->match_finder & 0x0F; ++ if (hash_bytes > mf->nice_len) ++ return 1; ++ ++ const _Bool is_bt = (lz_options->match_finder & 0x10) != 0; ++ uint32_t hs; ++ ++ if (hash_bytes == 2) { ++ hs = 0xFFFF; ++ } else { ++ hs = lz_options->dict_size - 1; ++ hs |= hs >> 1; ++ hs |= hs >> 2; ++ hs |= hs >> 4; ++ hs |= hs >> 8; ++ hs >>= 1; ++ hs |= 0xFFFF; ++ ++ if (hs > (1U << 24)) { ++ if (hash_bytes == 3) ++ hs = (1U << 24) - 1; ++ else ++ hs >>= 1; ++ } ++ } ++ ++ mf->hash_mask = hs; ++ ++ ++hs; ++ if (hash_bytes > 2) ++ hs += (1U << 10); ++ if (hash_bytes > 3) ++ hs += (1U << 16); ++ ++ const uint32_t old_hash_count = mf->hash_count; ++ const uint32_t old_sons_count = mf->sons_count; ++ mf->hash_count = hs; ++ mf->sons_count = mf->cyclic_size; ++ if (is_bt) ++ mf->sons_count *= 2; ++ ++ if (old_hash_count != mf->hash_count ++ || old_sons_count != mf->sons_count) { ++ lzma_free(mf->hash, allocator); ++ mf->hash = ((void *)0); ++ ++ lzma_free(mf->son, allocator); ++ mf->son = ((void *)0); ++ } ++ ++ mf->depth = lz_options->depth; ++ if (mf->depth == 0) { ++ if (is_bt) ++ mf->depth = 16 + mf->nice_len / 2; ++ else ++ mf->depth = 4 + mf->nice_len / 4; ++ } ++ ++ return 0; ++} ++ ++int ++main () ++{ ++ lzma_mf mf; ++ lzma_allocator allocator; ++ lzma_lz_options lz_options; ++ ++ void *coder; ++ uint8_t *restrict out; ++ size_t *restrict out_pos; ++ size_t out_size; ++ ++ lz_encoder_prepare(&mf, &allocator, &lz_options); ++ return (int) lzma_lzma_encode(coder, &mf, out, out_pos, out_size, (4294967295U)); ++} ++ ++ ++/* { dg-final { scan-wpa-ipa-dump "Save results of indirect call analysis." "icp"} } */ ++/* { dg-final { scan-wpa-ipa-dump-times "For call" 2 "icp"} } */ ++/* { dg-final { scan-wpa-ipa-dump-times "Insert 0 prefetch stmt:" 5 "ipa_prefetch"} } */ ++/* { dg-final { scan-wpa-ipa-dump-times "Insert 1 prefetch stmt:" 4 "ipa_prefetch"} } */ ++/* { dg-final { scan-wpa-ipa-dump-times "Insert 2 prefetch stmt:" 2 "ipa_prefetch"} } */ +-- +2.33.0 + diff --git a/0052-Fix-fails-in-IPA-prefetch-src-openEuler-gcc-I96ID7.patch b/0052-Fix-fails-in-IPA-prefetch-src-openEuler-gcc-I96ID7.patch new file mode 100644 index 0000000000000000000000000000000000000000..e50c9b2b682f7498c6b807b5db12cf7fc7c115d5 --- /dev/null +++ b/0052-Fix-fails-in-IPA-prefetch-src-openEuler-gcc-I96ID7.patch @@ -0,0 +1,94 @@ +From 0263daa1312d0cdcdf9c770bcf5d982a2d4fc16b Mon Sep 17 00:00:00 2001 +From: Diachkov Ilia +Date: Fri, 29 Mar 2024 17:15:41 +0800 +Subject: [PATCH 2/2] Fix fails in IPA prefetch (src-openEuler/gcc: I96ID7) + +--- + gcc/ipa-prefetch.cc | 28 ++++++++++++++++++++++++++-- + 1 file changed, 26 insertions(+), 2 deletions(-) + +diff --git a/gcc/ipa-prefetch.cc b/gcc/ipa-prefetch.cc +index 9537e4835..1ceb5137f 100644 +--- a/gcc/ipa-prefetch.cc ++++ b/gcc/ipa-prefetch.cc +@@ -366,6 +366,7 @@ typedef std::map memref_map; + typedef std::map memref_tree_map; + + typedef std::set stmt_set; ++typedef std::set tree_set; + typedef std::map tree_map; + + tree_memref_map *tm_map; +@@ -1124,8 +1125,21 @@ analyse_loops () + } + } + ++/* Compare memrefs by IDs; helper for qsort. */ ++ ++static int ++memref_id_cmp (const void *p1, const void *p2) ++{ ++ const memref_t *mr1 = *(const memref_t **) p1; ++ const memref_t *mr2 = *(const memref_t **) p2; ++ ++ if ((unsigned) mr1->mr_id > (unsigned) mr2->mr_id) ++ return 1; ++ return -1; ++} ++ + /* Reduce the set filtering out memrefs with the same memory references, +- return the result vector of memrefs. */ ++ sort and return the result vector of memrefs. */ + + static void + reduce_memref_set (memref_set *set, vec &vec) +@@ -1162,6 +1176,7 @@ reduce_memref_set (memref_set *set, vec &vec) + vec.safe_push (mr1); + } + } ++ vec.qsort (memref_id_cmp); + if (dump_file) + { + fprintf (dump_file, "MRs (%d) after filtering: ", vec.length ()); +@@ -1663,10 +1678,15 @@ optimize_function (cgraph_node *n, function *fn) + } + + /* Create other new vars. Insert new stmts. */ ++ vec used_mr_vec = vNULL; + for (memref_set::const_iterator it = used_mrs.begin (); + it != used_mrs.end (); it++) ++ used_mr_vec.safe_push (*it); ++ used_mr_vec.qsort (memref_id_cmp); ++ ++ for (unsigned int j = 0; j < used_mr_vec.length (); j++) + { +- memref_t *mr = *it; ++ memref_t *mr = used_mr_vec[j]; + if (mr == comp_mr) + continue; + gimple *last_stmt = gimple_copy_and_remap_memref_stmts (mr, stmts, 0, +@@ -1702,6 +1722,7 @@ optimize_function (cgraph_node *n, function *fn) + local = integer_three_node; + break; + } ++ tree_set prefetched_addrs; + for (unsigned int j = 0; j < vmrs.length (); j++) + { + memref_t *mr = vmrs[j]; +@@ -1714,10 +1735,13 @@ optimize_function (cgraph_node *n, function *fn) + tree addr = get_mem_ref_address_ssa_name (mr->mem, NULL_TREE); + if (decl_map->count (addr)) + addr = (*decl_map)[addr]; ++ if (prefetched_addrs.count (addr)) ++ continue; + last_stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH), + 3, addr, write_p, local); + pcalls.safe_push (last_stmt); + gimple_seq_add_stmt (&stmts, last_stmt); ++ prefetched_addrs.insert (addr); + if (dump_file) + { + fprintf (dump_file, "Insert %d prefetch stmt:\n", j); +-- +2.33.0 + diff --git a/0053-struct-reorg-Add-Semi-Relayout.patch b/0053-struct-reorg-Add-Semi-Relayout.patch new file mode 100644 index 0000000000000000000000000000000000000000..f68716441813a9db7e5b4b9b73c486b8c21d8db6 --- /dev/null +++ b/0053-struct-reorg-Add-Semi-Relayout.patch @@ -0,0 +1,1366 @@ +From c2a0dcc565e0f6274f26644bd389337db8f2940c Mon Sep 17 00:00:00 2001 +From: tiancheng-bao +Date: Sat, 30 Mar 2024 11:04:23 +0800 +Subject: [PATCH] [struct-reorg] Add Semi Relayout + +--- + gcc/common.opt | 6 +- + gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 975 +++++++++++++++++- + gcc/ipa-struct-reorg/ipa-struct-reorg.h | 8 + + gcc/params.opt | 5 + + .../gcc.dg/struct/semi_relayout_rewrite.c | 86 ++ + gcc/testsuite/gcc.dg/struct/struct-reorg.exp | 4 + + 6 files changed, 1040 insertions(+), 44 deletions(-) + create mode 100644 gcc/testsuite/gcc.dg/struct/semi_relayout_rewrite.c + +diff --git a/gcc/common.opt b/gcc/common.opt +index 38f1e457d..9484df5ad 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -2010,9 +2010,9 @@ Common Var(flag_ipa_struct_reorg) Init(0) Optimization + Perform structure layout optimizations. + + fipa-struct-reorg= +-Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0) IntegerRange(0, 5) +--fipa-struct-reorg=[0,1,2,3,4,5] adding none, struct-reorg, reorder-fields, +-dfe, safe-pointer-compression, unsafe-pointer-compression optimizations. ++Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0) IntegerRange(0, 6) ++-fipa-struct-reorg=[0,1,2,3,4,5,6] adding none, struct-reorg, reorder-fields, ++dfe, safe-pointer-compression, unsafe-pointer-compression, semi-relayout optimizations. + + fipa-vrp + Common Var(flag_ipa_vrp) Optimization +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +index 3922873f3..6a202b4bd 100644 +--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +@@ -294,7 +294,8 @@ enum struct_layout_opt_level + STRUCT_REORDER_FIELDS = 1 << 2, + DEAD_FIELD_ELIMINATION = 1 << 3, + POINTER_COMPRESSION_SAFE = 1 << 4, +- POINTER_COMPRESSION_UNSAFE = 1 << 5 ++ POINTER_COMPRESSION_UNSAFE = 1 << 5, ++ SEMI_RELAYOUT = 1 << 6 + }; + + /* Defines the target pointer size of compressed pointer, which should be 8, +@@ -308,6 +309,7 @@ void get_base (tree &base, tree expr); + + static unsigned int current_layout_opt_level; + hash_map replace_type_map; ++hash_map semi_relayout_map; + + /* Return true if one of these types is created by struct-reorg. */ + +@@ -426,7 +428,9 @@ srtype::srtype (tree type) + visited (false), + pc_candidate (false), + has_legal_alloc_num (false), +- has_alloc_array (0) ++ has_alloc_array (0), ++ semi_relayout (false), ++ bucket_parts (0) + { + for (int i = 0; i < max_split; i++) + newtype[i] = NULL_TREE; +@@ -891,6 +895,66 @@ srfield::create_new_reorder_fields (tree newtype[max_split], + newfield[0] = field; + } + ++/* Given a struct s whose fields has already reordered by size, we try to ++ combine fields less than 8 bytes together to 8 bytes. Example: ++ struct s { ++ uint64_t a, ++ uint32_t b, ++ uint32_t c, ++ uint32_t d, ++ uint16_t e, ++ uint8_t f ++ } ++ ++ We allocate memory for arrays of struct S, before semi-relayout, their ++ layout in memory is shown as below: ++ [a,b,c,d,e,f,padding;a,b,c,d,e,f,padding;...] ++ ++ During semi-relayout, we put a number of structs into a same region called ++ bucket. The number is determined by param realyout-bucket-capacity-level. ++ Using 1024 here as example. After semi-relayout, the layout in a bucket is ++ shown as below: ++ part1 [a;a;a...] ++ part2 [b,c;b,c;b,c;...] ++ part3 [d,e,f,pad;d,e,f,pad;d,e,f,pad;...] ++ ++ In the last bucket, if the amount of rest structs is less than the capacity ++ of a bucket, the rest of allcated memory will be wasted as padding. */ ++ ++unsigned ++srtype::calculate_bucket_size () ++{ ++ unsigned parts = 0; ++ unsigned bit_sum = 0; ++ unsigned relayout_offset = 0; ++ /* Currently, limit each 8 bytes with less than 2 fields. */ ++ unsigned curr_part_num = 0; ++ unsigned field_num = 0; ++ for (tree f = TYPE_FIELDS (newtype[0]); f; f = DECL_CHAIN (f)) ++ { ++ unsigned size = TYPE_PRECISION (TREE_TYPE (f)); ++ bit_sum += size; ++ field_num++; ++ if (++curr_part_num > 2 || bit_sum > 64) ++ { ++ bit_sum = size; ++ parts++; ++ relayout_offset = relayout_part_size * parts; ++ curr_part_num = 1; ++ } ++ else ++ { ++ relayout_offset = relayout_part_size * parts + (bit_sum - size) / 8; ++ } ++ new_field_offsets.put (f, relayout_offset); ++ } ++ /* Donnot relayout a struct with only one field after DFE. */ ++ if (field_num == 1) ++ return 0; ++ bucket_parts = ++parts; ++ return parts * relayout_part_size; ++} ++ + /* Create the new TYPE corresponding to THIS type. */ + + bool +@@ -1001,6 +1065,15 @@ srtype::create_new_type (void) + if (pc_candidate && pc_gptr == NULL_TREE) + create_global_ptr_for_pc (); + ++ if (semi_relayout) ++ { ++ bucket_size = calculate_bucket_size (); ++ if (bucket_size == 0) ++ return false; ++ if (semi_relayout_map.get (this->newtype[0]) == NULL) ++ semi_relayout_map.put (this->newtype[0], this->type); ++ } ++ + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "Created %d types:\n", maxclusters); +@@ -1393,7 +1466,7 @@ public: + bool should_create = false, bool can_escape = false); + bool wholeaccess (tree expr, tree base, tree accesstype, srtype *t); + +- void check_alloc_num (gimple *stmt, srtype *type); ++ void check_alloc_num (gimple *stmt, srtype *type, bool ptrptr); + void check_definition_assign (srdecl *decl, vec &worklist); + void check_definition_call (srdecl *decl, vec &worklist); + void check_definition (srdecl *decl, vec &); +@@ -1442,6 +1515,33 @@ public: + tree &); + basic_block create_bb_for_compress_nullptr (basic_block, tree &); + basic_block create_bb_for_decompress_nullptr (basic_block, tree, tree &); ++ ++ // Semi-relayout methods: ++ bool is_semi_relayout_candidate (tree); ++ srtype *get_semi_relayout_candidate_type (tree); ++ void check_and_prune_struct_for_semi_relayout (void); ++ tree rewrite_pointer_diff (gimple_stmt_iterator *, tree, tree, srtype *); ++ tree rewrite_pointer_plus_integer (gimple *, gimple_stmt_iterator *, tree, ++ tree, srtype *); ++ tree build_div_expr (gimple_stmt_iterator *, tree, tree); ++ tree get_true_pointer_base (gimple_stmt_iterator *, tree, srtype *); ++ tree get_real_allocated_ptr (tree, gimple_stmt_iterator *); ++ tree set_ptr_for_use (tree, gimple_stmt_iterator *); ++ void record_allocated_size (tree, gimple_stmt_iterator *, tree); ++ tree read_allocated_size (tree, gimple_stmt_iterator *); ++ gimple *create_aligned_alloc (gimple_stmt_iterator *, srtype *, tree, ++ tree &); ++ void create_memset_zero (tree, gimple_stmt_iterator *, tree); ++ void create_memcpy (tree, tree, tree, gimple_stmt_iterator *); ++ void create_free (tree, gimple_stmt_iterator *); ++ void copy_to_lhs (tree, tree, gimple_stmt_iterator *); ++ srtype *get_relayout_candidate_type (tree); ++ long unsigned int get_true_field_offset (srfield *, srtype *); ++ tree rewrite_address (tree, srfield *, srtype *, gimple_stmt_iterator *); ++ bool check_sr_copy (gimple *); ++ void relayout_field_copy (gimple_stmt_iterator *, gimple *, tree, tree, ++ tree&, tree &); ++ bool do_semi_relayout (gimple_stmt_iterator *, gimple *, tree &, tree &); + }; + + struct ipa_struct_relayout +@@ -4355,7 +4455,7 @@ ipa_struct_reorg::check_type_and_push (tree newdecl, srdecl *decl, + } + + void +-ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type) ++ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type, bool ptrptr) + { + if (current_layout_opt_level >= COMPLETE_STRUCT_RELAYOUT + && handled_allocation_stmt (stmt)) +@@ -4363,13 +4463,28 @@ ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type) + tree arg0 = gimple_call_arg (stmt, 0); + basic_block bb = gimple_bb (stmt); + cgraph_node *node = current_function->node; ++ if (!ptrptr && current_layout_opt_level >= SEMI_RELAYOUT ++ && gimple_call_builtin_p (stmt, BUILT_IN_MALLOC)) ++ { ++ /* Malloc is commonly used for allocations of ++ a single struct and semi-relayout will waste ++ a mess of memory, so we skip it. */ ++ type->has_alloc_array = -4; ++ return; ++ } + if (integer_onep (arg0)) + /* Actually NOT an array, but may ruin other array. */ + type->has_alloc_array = -1; + else if (bb->loop_father != NULL + && loop_outer (bb->loop_father) != NULL) +- /* The allocation is in a loop. */ +- type->has_alloc_array = -2; ++ { ++ /* For semi-relayout, do not escape realloc. */ ++ if (current_layout_opt_level & SEMI_RELAYOUT ++ && gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)) ++ return; ++ /* The allocation is in a loop. */ ++ type->has_alloc_array = -2; ++ } + else if (node->callers != NULL) + type->has_alloc_array = -3; + else +@@ -4448,6 +4563,13 @@ ipa_struct_reorg::check_definition_assign (srdecl *decl, + return; + } + ++ if (semi_relayout_map.get (type->type) != NULL) ++ { ++ if (current_layout_opt_level != COMPLETE_STRUCT_RELAYOUT) ++ type->mark_escape (escape_unhandled_rewrite, stmt); ++ return; ++ } ++ + /* d) if the name is from a cast/assignment, make sure it is used as + that type or void* + i) If void* then push the ssa_name into worklist. */ +@@ -4484,7 +4606,8 @@ ipa_struct_reorg::check_definition_call (srdecl *decl, vec &worklist) + type->mark_escape (escape_return, stmt); + } + +- check_alloc_num (stmt, type); ++ bool ptrptr = isptrptr (decl->orig_type); ++ check_alloc_num (stmt, type, ptrptr); + return; + } + +@@ -6038,6 +6161,55 @@ ipa_struct_reorg::pc_candidate_tree_p (tree xhs) + return false; + } + ++srtype * ++ipa_struct_reorg::get_semi_relayout_candidate_type (tree xhs) ++{ ++ if (xhs == NULL) ++ return NULL; ++ if (TREE_CODE (xhs) == SSA_NAME || TREE_CODE (xhs) == COMPONENT_REF) ++ { ++ srtype *access_type = find_type (inner_type (TREE_TYPE (xhs))); ++ if (access_type != NULL && access_type->semi_relayout) ++ return access_type; ++ } ++ return NULL; ++} ++ ++bool ++ipa_struct_reorg::is_semi_relayout_candidate (tree xhs) ++{ ++ if (xhs == NULL) ++ return false; ++ ++ if (TREE_CODE (xhs) == SSA_NAME) ++ xhs = TREE_TYPE (xhs); ++ ++ if (TREE_CODE (xhs) == POINTER_TYPE) ++ { ++ srtype *var_type = find_type (TREE_TYPE (xhs)); ++ if (!var_type || var_type->has_escaped ()) ++ return false; ++ if (var_type->semi_relayout) ++ return true; ++ } ++ ++ if (TREE_CODE (xhs) == COMPONENT_REF) ++ { ++ tree mem = TREE_OPERAND (xhs, 0); ++ if (TREE_CODE (mem) == MEM_REF) ++ { ++ tree type = TREE_TYPE (mem); ++ srtype *old_type = get_relayout_candidate_type (type); ++ if (!old_type) ++ return false; ++ if (types_compatible_p (type, old_type->type) ++ && old_type->semi_relayout) ++ return true; ++ } ++ } ++ return false; ++} ++ + /* True if xhs is a component_ref that base has escaped but uses a compression + candidate type. */ + +@@ -6388,7 +6560,7 @@ ipa_struct_reorg::decompress_candidate_without_check (gimple_stmt_iterator *gsi, + } + } + /* -> _1 = t->s +- _2 = _1->s ++ _2 = _1->s + In this case, _1 might not be nullptr, so decompress it without + check. */ + else if (TREE_CODE (next_rhs) == COMPONENT_REF) +@@ -6582,6 +6754,426 @@ ipa_struct_reorg::try_rewrite_with_pointer_compression (gassign *stmt, + } + } + ++tree ++ipa_struct_reorg::rewrite_pointer_diff (gimple_stmt_iterator *gsi, tree ptr1, ++ tree ptr2, srtype *type) ++{ ++ tree shifts = build_int_cst (long_integer_type_node, semi_relayout_align); ++ tree pointer_type = build_pointer_type (unsigned_char_type_node); ++ // tree pointer_type = build_pointer_type (long_integer_type_node); ++ tree intptr_type = signed_type_for (pointer_type); ++ ++ /* addr_high_1 = (intptr_t)ptr1 >> shifts */ ++ tree ptr1_cvt = fold_convert (intptr_type, ptr1); ++ tree addr_high_1 = gimplify_build2 (gsi, RSHIFT_EXPR, intptr_type, ++ ptr1_cvt, shifts); ++ /* addr_high_2 = (intptr_t)ptr2 >> shifts */ ++ tree ptr2_cvt = fold_convert (intptr_type, ptr2); ++ tree addr_high_2 = gimplify_build2 (gsi, RSHIFT_EXPR, intptr_type, ++ ptr2_cvt, shifts); ++ /* off1 = (intptr_t)ptr1 - (addr_high_1 << shifts) */ ++ tree bucket_start_1 = gimplify_build2 (gsi, LSHIFT_EXPR, intptr_type, ++ addr_high_1, shifts); ++ tree off1 = gimplify_build2 (gsi, MINUS_EXPR, intptr_type, ++ ptr1_cvt, bucket_start_1); ++ /* off2 = (intptr_t)ptr2 - (addr_high_2 << shifts) */ ++ tree bucket_start_2 = gimplify_build2 (gsi, LSHIFT_EXPR, intptr_type, ++ addr_high_2, shifts); ++ tree off2 = gimplify_build2 (gsi, MINUS_EXPR, intptr_type, ++ ptr2_cvt, bucket_start_2); ++ /* group_diff = (addr_high_1 - addr_high_2) / bucket_parts */ ++ tree bucket_sub = gimplify_build2 (gsi, MINUS_EXPR, intptr_type, ++ addr_high_1, addr_high_2); ++ tree bucket_parts = build_int_cst (intptr_type, ++ type->bucket_parts); ++ tree group_diff = gimplify_build2 (gsi, TRUNC_DIV_EXPR, ++ intptr_type, ++ bucket_sub, bucket_parts); ++ /* off_addr_diff = off1 - off2 */ ++ tree off_addr_diff = gimplify_build2 (gsi, MINUS_EXPR, intptr_type, ++ off1, off2); ++ /* res = group_diff * bucket_capacity + off_diff / 8 */ ++ tree capacity = build_int_cst (long_integer_type_node, ++ relayout_part_size / 8); ++ tree unit_size = build_int_cst (long_integer_type_node, 8); ++ tree bucket_index_diff = gimplify_build2 (gsi, MULT_EXPR, ++ intptr_type, ++ group_diff, capacity); ++ tree off_index = gimplify_build2 (gsi, TRUNC_DIV_EXPR, ++ long_integer_type_node, ++ off_addr_diff, unit_size); ++ tree res = gimplify_build2 (gsi, PLUS_EXPR, intptr_type, ++ bucket_index_diff, off_index); ++ return res; ++} ++ ++basic_block ++create_bb_for_group_diff_eq_0 (basic_block last_bb, tree phi, tree new_granule) ++{ ++ basic_block new_bb = create_empty_bb (last_bb); ++ if (last_bb->loop_father != NULL) ++ { ++ add_bb_to_loop (new_bb, last_bb->loop_father); ++ loops_state_set (LOOPS_NEED_FIXUP); ++ } ++ /* Emit res = new_granule; */ ++ gimple_stmt_iterator gsi = gsi_last_bb (new_bb); ++ gimple *new_stmt = gimple_build_assign (phi, new_granule); ++ gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT); ++ return new_bb; ++} ++ ++basic_block ++create_bb_for_group_diff_ne_0 (basic_block new_bb, tree &phi, tree ptr, ++ tree group_diff, tree off_times_8, srtype *type) ++{ ++ tree intptr_type = signed_type_for (long_unsigned_type_node); ++ tree shifts = build_int_cst (intptr_type, semi_relayout_align); ++ gimple_stmt_iterator gsi = gsi_last_bb (new_bb); ++ gsi_insert_after (&gsi, gimple_build_nop (), GSI_NEW_STMT); ++ tree ptr_cvt = fold_convert (intptr_type, ptr); ++ /* curr_group_start = (ptr >> shifts) << shifts; */ ++ tree ptr_r_1 = gimplify_build2 (&gsi, RSHIFT_EXPR, intptr_type, ++ ptr_cvt, shifts); ++ tree curr_group_start = gimplify_build2 (&gsi, LSHIFT_EXPR, intptr_type, ++ ptr_r_1, shifts); ++ /* curr_off_from_group = ptr - curr_group_start; */ ++ tree curr_off_from_group = gimplify_build2 (&gsi, MINUS_EXPR, ++ intptr_type, ++ ptr_cvt, curr_group_start); ++ /* res = curr_group_start + ((group_diff * parts) << shifts) ++ + ((curr_off_from_group + off_times_8) % shifts); */ ++ tree step1 = gimplify_build2 (&gsi, MULT_EXPR, long_integer_type_node, ++ group_diff, build_int_cst ( ++ long_integer_type_node, type->bucket_parts)); ++ tree step1_cvt = fold_convert (intptr_type, step1); ++ tree step2 = gimplify_build2 (&gsi, LSHIFT_EXPR, intptr_type, ++ step1_cvt, shifts); ++ tree off_times_8_cvt = fold_convert (intptr_type, off_times_8); ++ tree step3 = gimplify_build2 (&gsi, PLUS_EXPR, intptr_type, ++ curr_off_from_group, off_times_8_cvt); ++ tree step4 = gimplify_build2 (&gsi, TRUNC_MOD_EXPR, intptr_type, ++ step3, build_int_cst (intptr_type, ++ relayout_part_size)); ++ tree step5 = gimplify_build2 (&gsi, PLUS_EXPR, intptr_type, ++ step2, step4); ++ tree res_phi1 = gimplify_build2 (&gsi, PLUS_EXPR, long_integer_type_node, ++ curr_group_start, step5); ++ /* if (group_diff < 0) */ ++ gcond *cond = gimple_build_cond (LT_EXPR, group_diff, ++ build_int_cst (long_integer_type_node, 0), ++ NULL_TREE, NULL_TREE); ++ gsi_insert_before (&gsi, cond, GSI_SAME_STMT); ++ /* remove nop */ ++ gsi_remove (&gsi, true); ++ /* res += shifts */ ++ basic_block true_bb = create_empty_bb (new_bb); ++ if (new_bb->loop_father != NULL) ++ { ++ add_bb_to_loop (true_bb, new_bb->loop_father); ++ loops_state_set (LOOPS_NEED_FIXUP); ++ } ++ gimple_stmt_iterator true_gsi = gsi_last_bb (true_bb); ++ tree res_phi2 = make_ssa_name (long_integer_type_node); ++ gimple *new_stmt ++ = gimple_build_assign (res_phi2, PLUS_EXPR, res_phi1, ++ build_int_cst (long_integer_type_node, ++ relayout_part_size)); ++ gsi_insert_after (&true_gsi, new_stmt, GSI_NEW_STMT); ++ /* create phi bb */ ++ basic_block res_bb = create_empty_bb (true_bb); ++ if (new_bb->loop_father != NULL) ++ { ++ add_bb_to_loop (res_bb, new_bb->loop_father); ++ loops_state_set (LOOPS_NEED_FIXUP); ++ } ++ /* rebuild cfg */ ++ edge etrue = make_edge (new_bb, true_bb, EDGE_TRUE_VALUE); ++ etrue->probability = profile_probability::unlikely (); ++ true_bb->count = etrue->count (); ++ ++ edge efalse = make_edge (new_bb, res_bb, EDGE_FALSE_VALUE); ++ efalse->probability = profile_probability::likely (); ++ res_bb->count = efalse->count (); ++ ++ edge efall = make_single_succ_edge (true_bb, res_bb, EDGE_FALLTHRU); ++ ++ phi = make_ssa_name (long_integer_type_node); ++ gphi *phi_node = create_phi_node (phi, res_bb); ++ add_phi_arg (phi_node, res_phi2, efall, UNKNOWN_LOCATION); ++ add_phi_arg (phi_node, res_phi1, efalse, UNKNOWN_LOCATION); ++ ++ if (dom_info_available_p (CDI_DOMINATORS)) ++ { ++ set_immediate_dominator (CDI_DOMINATORS, true_bb, new_bb); ++ set_immediate_dominator (CDI_DOMINATORS, res_bb, new_bb); ++ } ++ return res_bb; ++} ++ ++tree ++ipa_struct_reorg::rewrite_pointer_plus_integer (gimple *stmt, ++ gimple_stmt_iterator *gsi, ++ tree ptr, tree offset, ++ srtype *type) ++{ ++ gcc_assert (type->semi_relayout); ++ tree off = fold_convert (long_integer_type_node, offset); ++ tree num_8 = build_int_cst (long_integer_type_node, 8); ++ tree shifts = build_int_cst (long_integer_type_node, semi_relayout_align); ++ // tree shifts = build_int_cst (integer_type_node, semi_relayout_align); ++ /* off_times_8 = off * 8; */ ++ tree off_times_8 = gimplify_build2 (gsi, MULT_EXPR, long_integer_type_node, ++ off, num_8); ++ /* new_granule = ptr + off * 8; */ ++ tree ptr_int = fold_convert (long_integer_type_node, ptr); ++ tree new_granule = gimplify_build2 (gsi, PLUS_EXPR, long_integer_type_node, ++ ptr_int, off_times_8); ++ /* group_diff = (new_granule >> shifts) - (ptr >> shifts); */ ++ tree group_diff_rhs_1 = gimplify_build2 (gsi, RSHIFT_EXPR, ++ long_integer_type_node, ++ new_granule, shifts); ++ tree group_diff_rhs_2 = gimplify_build2 (gsi, RSHIFT_EXPR, ++ long_integer_type_node, ++ ptr_int, shifts); ++ tree group_diff = gimplify_build2 (gsi, MINUS_EXPR, long_integer_type_node, ++ group_diff_rhs_1, group_diff_rhs_2); ++ /* if (group_diff == 0) */ ++ gcond *cond = gimple_build_cond (EQ_EXPR, group_diff, ++ build_int_cst (long_integer_type_node, 0), ++ NULL_TREE, NULL_TREE); ++ gimple_set_location (cond, UNKNOWN_LOCATION); ++ gsi_insert_before (gsi, cond, GSI_SAME_STMT); ++ ++ edge e = split_block (cond->bb, cond); ++ basic_block split_src_bb = e->src; ++ basic_block split_dst_bb = e->dest; ++ remove_edge_raw (e); ++ /* if (group_diff == 0) ++ res = new_granule; */ ++ tree res_phi_1 = make_ssa_name (long_integer_type_node); ++ basic_block true_bb = create_bb_for_group_diff_eq_0 (split_src_bb, res_phi_1, ++ new_granule); ++ /* else */ ++ tree res_phi_2 = NULL_TREE; ++ basic_block false_bb = create_empty_bb (split_src_bb); ++ if (split_src_bb->loop_father != NULL) ++ { ++ add_bb_to_loop (false_bb, split_src_bb->loop_father); ++ loops_state_set (LOOPS_NEED_FIXUP); ++ } ++ ++ edge etrue = make_edge (split_src_bb, true_bb, EDGE_TRUE_VALUE); ++ etrue->probability = profile_probability::very_likely (); ++ true_bb->count = etrue->count (); ++ ++ edge efalse = make_edge (split_src_bb, false_bb, EDGE_FALSE_VALUE); ++ efalse->probability = profile_probability::unlikely (); ++ false_bb->count = efalse->count (); ++ basic_block res_bb = create_bb_for_group_diff_ne_0 (false_bb, res_phi_2, ++ ptr_int, group_diff, ++ off_times_8, type); ++ /* rebuild cfg */ ++ edge e_true_fall = make_single_succ_edge (true_bb, split_dst_bb, ++ EDGE_FALLTHRU); ++ edge e_false_fall = make_single_succ_edge (res_bb, split_dst_bb, ++ EDGE_FALLTHRU); ++ tree res_int = make_ssa_name (long_integer_type_node); ++ gphi *phi_node = create_phi_node (res_int, split_dst_bb); ++ add_phi_arg (phi_node, res_phi_1, e_true_fall, UNKNOWN_LOCATION); ++ add_phi_arg (phi_node, res_phi_2, e_false_fall, UNKNOWN_LOCATION); ++ if (dom_info_available_p (CDI_DOMINATORS)) ++ { ++ set_immediate_dominator (CDI_DOMINATORS, split_dst_bb, split_src_bb); ++ set_immediate_dominator (CDI_DOMINATORS, true_bb, split_src_bb); ++ set_immediate_dominator (CDI_DOMINATORS, false_bb, split_src_bb); ++ } ++ *gsi = gsi_start_bb (split_dst_bb); ++ tree pointer_type = build_pointer_type (unsigned_char_type_node); ++ tree res = gimplify_build1 (gsi, NOP_EXPR, pointer_type, res_int); ++ return res; ++} ++ ++tree ++ipa_struct_reorg::build_div_expr (gimple_stmt_iterator *gsi, ++ tree expr, tree orig_size) ++{ ++ tree div_expr = build2 (TRUNC_DIV_EXPR, long_unsigned_type_node, ++ expr, orig_size); ++ tree num = make_ssa_name (long_unsigned_type_node); ++ gimple *g = gimple_build_assign (num, div_expr); ++ gsi_insert_before (gsi, g, GSI_SAME_STMT); ++ return num; ++} ++ ++srtype * ++ipa_struct_reorg::get_relayout_candidate_type (tree type) ++{ ++ if (type == NULL) ++ return NULL; ++ if (TREE_CODE (type) != RECORD_TYPE) ++ return NULL; ++ return find_type (inner_type (type)); ++} ++ ++long unsigned int ++ipa_struct_reorg::get_true_field_offset (srfield *field, srtype *type) ++{ ++ unsigned HOST_WIDE_INT new_offset; ++ new_offset = *(type->new_field_offsets.get (field->newfield[0])); ++ return new_offset; ++} ++ ++tree ++ipa_struct_reorg::get_true_pointer_base (gimple_stmt_iterator *gsi, ++ tree mem_ref, srtype *type) ++{ ++ tree ptr = TREE_OPERAND (mem_ref, 0); ++ tree off_bytes = TREE_OPERAND (mem_ref, 1); ++ unsigned num = tree_to_shwi (off_bytes); ++ if (num == 0) ++ return ptr; ++ tree orig_size = TYPE_SIZE_UNIT (TREE_TYPE (mem_ref)); ++ tree off = build_int_cst (long_integer_type_node, ++ num / tree_to_uhwi (orig_size)); ++ gimple *stmt = gsi_stmt (*gsi); ++ tree new_pointer_base = rewrite_pointer_plus_integer (stmt, gsi, ptr, ++ off, type); ++ return new_pointer_base; ++} ++ ++tree ++ipa_struct_reorg::rewrite_address (tree pointer_base, srfield *field, ++ srtype *type, gimple_stmt_iterator *gsi) ++{ ++ unsigned HOST_WIDE_INT field_offset = get_true_field_offset (field, type); ++ ++ tree pointer_ssa = fold_convert (long_unsigned_type_node, pointer_base); ++ tree step1 = gimplify_build1 (gsi, NOP_EXPR, long_unsigned_type_node, ++ pointer_ssa); ++ tree new_offset_ssa = build_int_cst (long_unsigned_type_node, field_offset); ++ tree step2 = gimplify_build2 (gsi, PLUS_EXPR, long_unsigned_type_node, step1, ++ new_offset_ssa); ++ tree field_ssa = fold_convert ( ++ build_pointer_type (TREE_TYPE (field->newfield[0])), step2); ++ tree step3 = gimplify_build1 (gsi, NOP_EXPR, ++ TREE_TYPE (field_ssa), field_ssa); ++ ++ tree new_mem_ref = fold_build2 (MEM_REF, TREE_TYPE (field->newfield[0]), ++ step3, build_int_cst ( ++ TREE_TYPE (field_ssa), 0)); ++ return new_mem_ref; ++} ++ ++bool ++ipa_struct_reorg::check_sr_copy (gimple *stmt) ++{ ++ tree lhs = gimple_assign_lhs (stmt); ++ tree rhs = gimple_assign_rhs1 (stmt); ++ ++ if (TREE_CODE (lhs) != MEM_REF || TREE_CODE (rhs) != MEM_REF) ++ return false; ++ srtype *t1 = get_relayout_candidate_type (TREE_TYPE (lhs)); ++ srtype *t2 = get_relayout_candidate_type (TREE_TYPE (rhs)); ++ if (!t1 || !t2 || !t1->semi_relayout || !t2->semi_relayout || t1 != t2) ++ return false; ++ tree pointer1 = TREE_OPERAND (lhs, 0); ++ tree pointer2 = TREE_OPERAND (rhs, 0); ++ if (TREE_CODE (TREE_TYPE (pointer1)) != POINTER_TYPE ++ || TREE_CODE (TREE_TYPE (pointer2)) != POINTER_TYPE) ++ return false; ++ ++ tree type1 = TREE_TYPE (TREE_TYPE (pointer1)); ++ tree type2 = TREE_TYPE (TREE_TYPE (pointer2)); ++ ++ srtype *t3 = get_relayout_candidate_type (type1); ++ srtype *t4 = get_relayout_candidate_type (type2); ++ ++ if (t3 != t4 || t3 != t1) ++ return false; ++ ++ return true; ++} ++ ++void ++ipa_struct_reorg::relayout_field_copy (gimple_stmt_iterator *gsi, ++ gimple *stmt ATTRIBUTE_UNUSED, ++ tree lhs, tree rhs ATTRIBUTE_UNUSED, ++ tree &newlhs, tree &newrhs) ++{ ++ srtype *type = get_relayout_candidate_type (TREE_TYPE (lhs)); ++ tree lhs_base_pointer = get_true_pointer_base (gsi, newlhs, type); ++ tree rhs_base_pointer = get_true_pointer_base (gsi, newrhs, type); ++ tree new_l_mem_ref = NULL_TREE; ++ tree new_r_mem_ref = NULL_TREE; ++ srfield *field = NULL; ++ unsigned i = 0; ++ FOR_EACH_VEC_ELT (type->fields, i, field) ++ { ++ if (!field->newfield[0]) ++ continue; ++ new_l_mem_ref = rewrite_address (lhs_base_pointer, field, type, gsi); ++ new_r_mem_ref = rewrite_address (rhs_base_pointer, field, type, gsi); ++ if (!is_gimple_reg (new_l_mem_ref)) ++ { ++ tree tmp_reg = create_tmp_reg (TREE_TYPE(new_l_mem_ref)); ++ gimple *copy_stmt = gimple_build_assign (tmp_reg, new_r_mem_ref); ++ gsi_insert_before (gsi, copy_stmt, GSI_SAME_STMT); ++ new_r_mem_ref = tmp_reg; ++ } ++ gimple *new_stmt = gimple_build_assign (new_l_mem_ref, new_r_mem_ref); ++ gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); ++ } ++ gcc_assert (new_l_mem_ref != NULL_TREE && new_r_mem_ref != NULL_TREE); ++ newlhs = new_l_mem_ref; ++ newrhs = new_r_mem_ref; ++} ++ ++bool ++ipa_struct_reorg::do_semi_relayout (gimple_stmt_iterator *gsi, gimple *stmt, ++ tree &newlhs, tree &newrhs) ++{ ++ tree lhs = gimple_assign_lhs (stmt); ++ tree rhs = gimple_assign_rhs1 (stmt); ++ ++ bool l = TREE_CODE (lhs) == COMPONENT_REF ? is_semi_relayout_candidate (lhs) ++ : false; ++ bool r = TREE_CODE (rhs) == COMPONENT_REF ? is_semi_relayout_candidate (rhs) ++ : false; ++ ++ gcc_assert (!(l && r)); ++ ++ if (!l && !r) ++ { ++ if (check_sr_copy (stmt)) ++ { ++ relayout_field_copy (gsi, stmt, lhs, rhs, newlhs, newrhs); ++ return true; ++ } ++ } ++ else if (l) ++ { ++ srtype *type = get_relayout_candidate_type ( ++ TREE_TYPE (TREE_OPERAND (lhs, 0))); ++ srfield *new_field = type->find_field ( ++ int_byte_position (TREE_OPERAND (lhs, 1))); ++ tree pointer_base = get_true_pointer_base ( ++ gsi, TREE_OPERAND (newlhs, 0), type); ++ newlhs = rewrite_address (pointer_base, new_field, type, gsi); ++ } ++ else if (r) ++ { ++ srtype *type = get_relayout_candidate_type ( ++ TREE_TYPE (TREE_OPERAND (rhs, 0))); ++ srfield *new_field = type->find_field ( ++ int_byte_position (TREE_OPERAND (rhs, 1))); ++ tree pointer_base = get_true_pointer_base ( ++ gsi, TREE_OPERAND (newrhs, 0), type); ++ newrhs = rewrite_address (pointer_base, new_field, type, gsi); ++ } ++ return false; ++} ++ + bool + ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + { +@@ -6677,7 +7269,8 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + tree size = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (lhs))); + tree num; + /* Check if rhs2 is a multiplication of the size of the type. */ +- if (!is_result_of_mult (rhs2, &num, size)) ++ if (!is_result_of_mult (rhs2, &num, size) ++ && !(current_layout_opt_level & SEMI_RELAYOUT)) + internal_error ( + "The rhs of pointer is not a multiplicate and it slips through"); + +@@ -6698,12 +7291,39 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + tree newsize = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (newlhs[i]))); + newsize = gimplify_build2 (gsi, MULT_EXPR, sizetype, num, + newsize); ++ if (current_layout_opt_level >= SEMI_RELAYOUT) ++ { ++ if (is_semi_relayout_candidate (lhs)) ++ { ++ srtype *type = get_semi_relayout_candidate_type (lhs); ++ newrhs[i] = rewrite_pointer_plus_integer (stmt, gsi, ++ newrhs[i], ++ num, type); ++ newsize = build_int_cst (long_unsigned_type_node, 0); ++ } ++ } + new_stmt = gimple_build_assign (newlhs[i], POINTER_PLUS_EXPR, + newrhs[i], newsize); + } + else +- new_stmt = gimple_build_assign (newlhs[i], POINTER_PLUS_EXPR, +- newrhs[i], rhs2); ++ { ++ /* rhs2 is not a const integer */ ++ if (current_layout_opt_level >= SEMI_RELAYOUT) ++ { ++ if (is_semi_relayout_candidate (lhs)) ++ { ++ num = build_div_expr (gsi, rhs2, ++ build_int_cst ( ++ long_unsigned_type_node, 1)); ++ srtype *type = get_semi_relayout_candidate_type (lhs); ++ newrhs[i] = rewrite_pointer_plus_integer (stmt, ++ gsi, newrhs[i], num, type); ++ rhs2 = build_int_cst (long_unsigned_type_node, 0); ++ } ++ } ++ new_stmt = gimple_build_assign (newlhs[i], POINTER_PLUS_EXPR, ++ newrhs[i], rhs2); ++ } + gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); + remove = true; + } +@@ -6744,13 +7364,34 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + return false; + + /* The two operands always have pointer/reference type. */ +- for (unsigned i = 0; i < max_split && newrhs1[i] && newrhs2[i]; i++) ++ if (current_layout_opt_level >= SEMI_RELAYOUT ++ && (is_semi_relayout_candidate (rhs1) || is_semi_relayout_candidate (rhs2))) + { +- gimple_assign_set_rhs1 (stmt, newrhs1[i]); +- gimple_assign_set_rhs2 (stmt, newrhs2[i]); +- update_stmt (stmt); ++ for (unsigned i = 0; i < max_split && newrhs1[i] &&newrhs2[i]; i++) ++ { ++ srtype *type = get_semi_relayout_candidate_type (rhs1); ++ if (!type) ++ { ++ type = get_semi_relayout_candidate_type (rhs2); ++ } ++ gcc_assert (type != NULL); ++ tree res = rewrite_pointer_diff (gsi, newrhs1[i], ++ newrhs2[i], type); ++ gimple *g = gimple_build_assign (gimple_assign_lhs (stmt), ++ res); ++ gsi_insert_before (gsi, g, GSI_SAME_STMT); ++ } ++ remove = true; ++ } ++ else ++ { ++ for (unsigned i = 0; i < max_split && newrhs1[i] && newrhs2[i]; i++) ++ { ++ gimple_assign_set_rhs1 (stmt, newrhs1[i]); ++ gimple_assign_set_rhs2 (stmt, newrhs2[i]); ++ update_stmt (stmt); ++ } + } +- remove = false; + return remove; + } + +@@ -6777,18 +7418,24 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + fprintf (dump_file, "\nreplaced with:\n"); + for (unsigned i = 0; i < max_split && (newlhs[i] || newrhs[i]); i++) + { ++ bool fields_copied = false; ++ if (current_layout_opt_level & SEMI_RELAYOUT) ++ fields_copied = do_semi_relayout (gsi, stmt, newlhs[i], newrhs[i]); + if (current_layout_opt_level >= POINTER_COMPRESSION_SAFE) + try_rewrite_with_pointer_compression (stmt, gsi, lhs, rhs, + newlhs[i], newrhs[i]); +- gimple *newstmt = gimple_build_assign (newlhs[i] ? newlhs[i] : lhs, +- newrhs[i] ? newrhs[i] : rhs); ++ remove = true; ++ if (fields_copied) ++ continue; ++ tree lhs_expr = newlhs[i] ? newlhs[i] : lhs; ++ tree rhs_expr = newrhs[i] ? newrhs[i] : rhs; ++ gimple *newstmt = gimple_build_assign (lhs_expr, rhs_expr); + if (dump_file && (dump_flags & TDF_DETAILS)) + { + print_gimple_stmt (dump_file, newstmt, 0); + fprintf (dump_file, "\n"); + } + gsi_insert_before (gsi, newstmt, GSI_SAME_STMT); +- remove = true; + } + return remove; + } +@@ -6796,6 +7443,110 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + return remove; + } + ++tree ++ipa_struct_reorg::get_real_allocated_ptr (tree ptr, gimple_stmt_iterator *gsi) ++{ ++ tree ptr_to_int = fold_convert (long_unsigned_type_node, ptr); ++ tree align = build_int_cst (long_unsigned_type_node, relayout_part_size); ++ tree real_addr = gimplify_build2 (gsi, MINUS_EXPR, long_unsigned_type_node, ++ ptr_to_int, align); ++ tree res = gimplify_build1 (gsi, NOP_EXPR, ++ build_pointer_type (long_unsigned_type_node), ++ real_addr); ++ return res; ++} ++ ++tree ++ipa_struct_reorg::set_ptr_for_use (tree ptr, gimple_stmt_iterator *gsi) ++{ ++ tree ptr_to_int = fold_convert (long_unsigned_type_node, ptr); ++ tree align = build_int_cst (long_unsigned_type_node, relayout_part_size); ++ tree ptr_int = gimplify_build2 (gsi, PLUS_EXPR, long_unsigned_type_node, ++ ptr_to_int, align); ++ tree res = gimplify_build1 (gsi, NOP_EXPR, ++ build_pointer_type (long_unsigned_type_node), ++ ptr_int); ++ return res; ++} ++ ++void ++ipa_struct_reorg::record_allocated_size (tree ptr, gimple_stmt_iterator *gsi, ++ tree size) ++{ ++ tree lhs = fold_build2 (MEM_REF, long_unsigned_type_node, ptr, ++ build_int_cst (build_pointer_type ( ++ long_unsigned_type_node), 0)); ++ gimple *stmt = gimple_build_assign (lhs, size); ++ gsi_insert_before (gsi, stmt, GSI_SAME_STMT); ++} ++ ++tree ++ipa_struct_reorg::read_allocated_size (tree ptr, gimple_stmt_iterator *gsi) ++{ ++ tree to_type = build_pointer_type (long_unsigned_type_node); ++ tree off = build_int_cst (to_type, 0); ++ tree size = gimplify_build2 (gsi, MEM_REF, long_unsigned_type_node, ++ ptr, off); ++ return size; ++} ++ ++gimple * ++ipa_struct_reorg::create_aligned_alloc (gimple_stmt_iterator *gsi, ++ srtype *type, tree num, tree &size) ++{ ++ tree fn = builtin_decl_implicit (BUILT_IN_ALIGNED_ALLOC); ++ ++ tree align = build_int_cst (long_unsigned_type_node, relayout_part_size); ++ unsigned bucket_size = type->bucket_size; ++ ++ tree nbuckets = gimplify_build2 (gsi, CEIL_DIV_EXPR, long_unsigned_type_node, ++ num, build_int_cst (long_unsigned_type_node, ++ relayout_part_size / 8)); ++ tree use_size = gimplify_build2 (gsi, MULT_EXPR, long_unsigned_type_node, ++ nbuckets, build_int_cst ( ++ long_unsigned_type_node, bucket_size)); ++ size = gimplify_build2 (gsi, PLUS_EXPR, long_unsigned_type_node, ++ use_size, align); ++ gimple *g = gimple_build_call (fn, 2, align, size); ++ gsi_insert_before (gsi, g, GSI_SAME_STMT); ++ return g; ++} ++ ++void ++ipa_struct_reorg::create_memset_zero (tree ptr, gimple_stmt_iterator *gsi, ++ tree size) ++{ ++ tree fn = builtin_decl_implicit (BUILT_IN_MEMSET); ++ tree val = build_int_cst (long_unsigned_type_node, 0); ++ gimple *g = gimple_build_call (fn, 3, ptr, val, size); ++ gsi_insert_before (gsi, g, GSI_SAME_STMT); ++} ++ ++void ++ipa_struct_reorg::create_memcpy (tree src, tree dst, tree size, ++ gimple_stmt_iterator *gsi) ++{ ++ tree fn = builtin_decl_implicit (BUILT_IN_MEMCPY); ++ gimple *g = gimple_build_call (fn, 3, dst, src, size); ++ gsi_insert_before (gsi, g, GSI_SAME_STMT); ++} ++ ++void ++ipa_struct_reorg::create_free (tree ptr, gimple_stmt_iterator *gsi) ++{ ++ tree fn = builtin_decl_implicit (BUILT_IN_FREE); ++ gimple *g = gimple_build_call (fn, 1, ptr); ++ gsi_insert_before (gsi, g, GSI_SAME_STMT); ++} ++ ++void ++ipa_struct_reorg::copy_to_lhs (tree lhs, tree new_lhs, ++ gimple_stmt_iterator *gsi) ++{ ++ gimple *g = gimple_build_assign (lhs, new_lhs); ++ gsi_insert_before (gsi, g, GSI_SAME_STMT); ++} ++ + /* Rewrite function call statement STMT. Return TRUE if the statement + is to be removed. */ + +@@ -6837,25 +7588,74 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi) + ? TYPE_SIZE_UNIT (decl->orig_type) + : TYPE_SIZE_UNIT (type->newtype[i]); + gimple *g; +- /* Every allocation except for calloc needs +- the size multiplied out. */ +- if (!gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)) +- newsize = gimplify_build2 (gsi, MULT_EXPR, sizetype, num, newsize); +- +- if (gimple_call_builtin_p (stmt, BUILT_IN_MALLOC) +- || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA)) +- g = gimple_build_call (gimple_call_fndecl (stmt), +- 1, newsize); +- else if (gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)) +- g = gimple_build_call (gimple_call_fndecl (stmt), +- 2, num, newsize); +- else if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)) +- g = gimple_build_call (gimple_call_fndecl (stmt), +- 2, newrhs1[i], newsize); +- else +- gcc_assert (false); +- gimple_call_set_lhs (g, decl->newdecl[i]); +- gsi_insert_before (gsi, g, GSI_SAME_STMT); ++ bool rewrite = false; ++ if (current_layout_opt_level >= SEMI_RELAYOUT ++ && type->semi_relayout) ++ { ++ if (gimple_call_builtin_p (stmt, BUILT_IN_MALLOC)) ++ ; ++ else if (gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)) ++ { ++ tree rhs2 = gimple_call_arg (stmt, 1); ++ if (tree_to_uhwi (rhs2) == tree_to_uhwi (TYPE_SIZE_UNIT (type->type))) ++ { ++ rewrite = true; ++ tree size = NULL_TREE; ++ g = create_aligned_alloc (gsi, type, num, size); ++ tree real_ptr = make_ssa_name (build_pointer_type (unsigned_char_type_node)); ++ gimple_set_lhs (g, real_ptr); ++ create_memset_zero (real_ptr, gsi, size); ++ record_allocated_size (real_ptr, gsi, size); ++ tree lhs_use = set_ptr_for_use (real_ptr, gsi); ++ copy_to_lhs (decl->newdecl[i], lhs_use, gsi); ++ } ++ } ++ else if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)) ++ { ++ rewrite = true; ++ tree size = NULL_TREE; ++ g = create_aligned_alloc (gsi, type, num, size); ++ tree real_ptr = make_ssa_name (build_pointer_type (unsigned_char_type_node)); ++ gimple_set_lhs (g, real_ptr); ++ create_memset_zero (real_ptr, gsi, size); ++ tree src = get_real_allocated_ptr (newrhs1[i], gsi); ++ tree old_size = read_allocated_size (src, gsi); ++ create_memcpy (src, real_ptr, old_size, gsi); ++ record_allocated_size (real_ptr, gsi, size); ++ tree lhs_use = set_ptr_for_use (real_ptr, gsi); ++ create_free (src, gsi); ++ copy_to_lhs (decl->newdecl[i], lhs_use, gsi); ++ } ++ else ++ { ++ gcc_assert (false); ++ internal_error ("supported type for semi-relayout."); ++ } ++ } ++ if (!rewrite ++ && (current_layout_opt_level >= STRUCT_REORDER_FIELDS ++ || current_layout_opt_level == STRUCT_SPLIT)) ++ { ++ /* Every allocation except for calloc needs the size multiplied out. */ ++ if (!gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)) ++ newsize = gimplify_build2 (gsi, MULT_EXPR, sizetype, ++ num, newsize); ++ if (gimple_call_builtin_p (stmt, BUILT_IN_MALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA)) ++ g = gimple_build_call (gimple_call_fndecl (stmt), 1, newsize); ++ else if (gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)) ++ g = gimple_build_call (gimple_call_fndecl (stmt), 2, ++ num, newsize); ++ else if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)) ++ g = gimple_build_call (gimple_call_fndecl (stmt), 2, ++ newrhs1[i], newsize); ++ else ++ gcc_assert (false); ++ gimple_call_set_lhs (g, decl->newdecl[i]); ++ gsi_insert_before (gsi, g, GSI_SAME_STMT); ++ } ++ ++ + if (type->pc_candidate) + { + /* Init global header for pointer compression. */ +@@ -6875,11 +7675,14 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi) + if (!rewrite_expr (expr, newexpr)) + return false; + ++ srtype *t = find_type (TREE_TYPE (TREE_TYPE (expr))); + if (newexpr[1] == NULL) + { +- gimple_call_set_arg (stmt, 0, newexpr[0]); +- update_stmt (stmt); +- return false; ++ if (t && t->semi_relayout) ++ newexpr[0] = get_real_allocated_ptr (newexpr[0], gsi); ++ gimple_call_set_arg (stmt, 0, newexpr[0]); ++ update_stmt (stmt); ++ return false; + } + + for (unsigned i = 0; i < max_split && newexpr[i]; i++) +@@ -7571,6 +8374,86 @@ ipa_struct_reorg::check_and_prune_struct_for_pointer_compression (void) + } + } + ++void ++ipa_struct_reorg::check_and_prune_struct_for_semi_relayout (void) ++{ ++ unsigned relayout_transform = 0; ++ for (unsigned i = 0; i < types.length (); i++) ++ { ++ srtype *type = types[i]; ++ if (dump_file) ++ { ++ print_generic_expr (dump_file, type->type); ++ } ++ if (type->has_escaped ()) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, " has escaped by %s, " ++ "skip relayout.\n", type->escape_reason()); ++ } ++ continue; ++ } ++ if (TYPE_FIELDS (type->type) == NULL) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, " has zero field, skip relayout.\n"); ++ } ++ continue; ++ } ++ if (type->chain_type) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, " is chain_type, skip relayout.\n"); ++ } ++ continue; ++ } ++ if (type->has_alloc_array == 0 || type->has_alloc_array == 1 ++ || type->has_alloc_array == -1 || type->has_alloc_array == -3 ++ || type->has_alloc_array == -4) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, " has alloc number: %d," ++ " skip relayout.\n", type->has_alloc_array); ++ } ++ continue; ++ } ++ if (get_type_name (type->type) == NULL) ++ { ++ if (dump_file) ++ { ++ fprintf (dump_file, " has empty struct name," ++ " skip relayout.\n"); ++ } ++ continue; ++ } ++ relayout_transform++; ++ type->semi_relayout = true; ++ if (dump_file) ++ { ++ fprintf (dump_file, " attempts to do semi-relayout.\n"); ++ } ++ } ++ ++ if (dump_file) ++ { ++ if (relayout_transform) ++ { ++ fprintf (dump_file, "\nNumber of structures to transform in " ++ "semi-relayout is %d\n", relayout_transform); ++ } ++ else ++ { ++ fprintf (dump_file, "\nNo structures to transform in " ++ "semi-relayout.\n"); ++ } ++ } ++} ++ ++ + /* Init pointer size from parameter param_pointer_compression_size. */ + + static void +@@ -7612,6 +8495,8 @@ ipa_struct_reorg::execute (unsigned int opt) + + if (opt >= POINTER_COMPRESSION_SAFE) + check_and_prune_struct_for_pointer_compression (); ++ if (opt >= SEMI_RELAYOUT) ++ check_and_prune_struct_for_semi_relayout (); + ret = rewrite_functions (); + } + else +@@ -7659,6 +8544,8 @@ public: + unsigned int level = 0; + switch (struct_layout_optimize_level) + { ++ case 6: level |= SEMI_RELAYOUT; ++ // FALLTHRU + case 5: level |= POINTER_COMPRESSION_UNSAFE; + // FALLTHRU + case 4: level |= POINTER_COMPRESSION_SAFE; +@@ -7678,6 +8565,12 @@ public: + if (level & POINTER_COMPRESSION_SAFE) + init_pointer_size_for_pointer_compression (); + ++ if (level & SEMI_RELAYOUT) ++ { ++ semi_relayout_align = semi_relayout_level; ++ relayout_part_size = 1 << semi_relayout_level; ++ } ++ + /* Preserved for backward compatibility, reorder fields needs run before + struct split and complete struct relayout. */ + if (flag_ipa_reorder_fields && level < STRUCT_REORDER_FIELDS) +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.h b/gcc/ipa-struct-reorg/ipa-struct-reorg.h +index 6c4469597..e3e6d7afb 100644 +--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.h ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.h +@@ -25,6 +25,9 @@ namespace struct_reorg { + + const int max_split = 2; + ++unsigned semi_relayout_align = semi_relayout_level; ++unsigned relayout_part_size = 1 << semi_relayout_level; ++ + template + struct auto_vec_del : auto_vec + { +@@ -128,6 +131,10 @@ public: + /* Negative number means it has illegal allocated arrays + that we do not optimize. */ + int has_alloc_array; ++ bool semi_relayout; ++ hash_map new_field_offsets; ++ unsigned bucket_parts; ++ unsigned bucket_size; + + // Constructors + srtype (tree type); +@@ -149,6 +156,7 @@ public: + bool has_dead_field (void); + void mark_escape (escape_type, gimple *stmt); + void create_global_ptr_for_pc (); ++ unsigned calculate_bucket_size (); + bool has_escaped (void) + { + return escapes != does_not_escape; +diff --git a/gcc/params.opt b/gcc/params.opt +index bb5d82471..82a3d92c5 100644 +--- a/gcc/params.opt ++++ b/gcc/params.opt +@@ -1221,4 +1221,9 @@ Target size of compressed pointer, which should be 8, 16 or 32. + -param=param-ldp-dependency-search-range= + Common Joined UInteger Var(param_ldp_dependency_search_range) Init(16) IntegerRange(1, 32) Param Optimization + Range for depended ldp search in split-ldp-stp path. ++ ++-param=semi-relayout-level= ++Common Joined UInteger Var(semi_relayout_level) Init(13) IntegerRange(11, 15) Param Optimization ++Set capacity of each bucket to semi-relayout to (1 << semi-relayout-level) / 8 . ++ + ; This comment is to ensure we retain the blank line above. +diff --git a/gcc/testsuite/gcc.dg/struct/semi_relayout_rewrite.c b/gcc/testsuite/gcc.dg/struct/semi_relayout_rewrite.c +new file mode 100644 +index 000000000..aca8400ca +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/semi_relayout_rewrite.c +@@ -0,0 +1,86 @@ ++// Check simplify rewrite chance for semi-relayout ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct network ++{ ++ arc_p arcs; ++ arc_p sorted_arcs; ++ int x; ++ node_p nodes; ++ node_p stop_nodes; ++} network_t; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++ network_t* net_add; ++}; ++ ++ ++const int MAX = 100; ++network_t* net; ++node_p node; ++arc_p arc; ++ ++int ++main () ++{ ++ net = (network_t*) calloc (1, sizeof(network_t)); ++ net->arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ net->sorted_arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ net->nodes = (node_p) calloc (MAX, sizeof (node_t)); ++ net->arcs->id = 100; ++ ++ node = net->nodes; ++ arc = net->arcs; ++ ++ for (unsigned i = 0; i < MAX; i++) ++ { ++ arc->head = node; ++ arc->head->child = node; ++ node->potential = i + 1; ++ arc->cost = arc->head->potential; ++ arc->tail = node->sibling; ++ node = node + 1; ++ arc = arc + 1; ++ } ++ ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform in semi-relayout is 1" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp +index c40474407..c5a955b00 100644 +--- a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp ++++ b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp +@@ -55,6 +55,10 @@ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/pc*.c]] \ + gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/pc*.c]] \ + "" "-fipa-struct-reorg=5 -fdump-ipa-all -flto-partition=one -fwhole-program" + ++# -fipa-struct-reorg=6 ++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/semi_relayout*.c]] \ ++ "" "-fipa-struct-reorg=6 -fdump-ipa-all -flto-partition=one -fwhole-program" ++ + # All done. + torture-finish + dg-finish +-- +2.33.0 + diff --git a/0054-BUGFIX-Fix-the-configure-file-of-BOLT.patch b/0054-BUGFIX-Fix-the-configure-file-of-BOLT.patch new file mode 100644 index 0000000000000000000000000000000000000000..1d63246e0db9c267f4a3673683cd723dc2f2d82f --- /dev/null +++ b/0054-BUGFIX-Fix-the-configure-file-of-BOLT.patch @@ -0,0 +1,30102 @@ +From e245129ab722da21df3a2853474a9d4acf47fe67 Mon Sep 17 00:00:00 2001 +From: zhenyu--zhao_admin +Date: Fri, 26 Apr 2024 21:34:19 +0800 +Subject: [PATCH] [BUGFIX] Fix the configure file of BOLT + +--- + Makefile.in | 1319 +++++ + bolt-plugin/Makefile.in | 34 +- + bolt-plugin/aclocal.m4 | 9169 +----------------------------- + bolt-plugin/config.h.in | 106 +- + bolt-plugin/configure | 11313 +++++++++++++++----------------------- + 5 files changed, 5796 insertions(+), 16145 deletions(-) + +diff --git a/Makefile.in b/Makefile.in +index 593495e16..7785b3d9a 100644 +--- a/Makefile.in ++++ b/Makefile.in +@@ -1111,6 +1111,7 @@ configure-host: \ + maybe-configure-c++tools \ + maybe-configure-gnattools \ + maybe-configure-lto-plugin \ ++ maybe-configure-bolt-plugin \ + maybe-configure-libcc1 \ + maybe-configure-gotools \ + maybe-configure-libctf +@@ -1288,6 +1289,9 @@ all-host: maybe-all-gnattools + @if lto-plugin-no-bootstrap + all-host: maybe-all-lto-plugin + @endif lto-plugin-no-bootstrap ++@if bolt-plugin-no-bootstrap ++all-host: maybe-all-bolt-plugin ++@endif bolt-plugin-no-bootstrap + all-host: maybe-all-libcc1 + all-host: maybe-all-gotools + @if libctf-no-bootstrap +@@ -1403,6 +1407,7 @@ info-host: maybe-info-utils + info-host: maybe-info-c++tools + info-host: maybe-info-gnattools + info-host: maybe-info-lto-plugin ++info-host: maybe-info-bolt-plugin + info-host: maybe-info-libcc1 + info-host: maybe-info-gotools + info-host: maybe-info-libctf +@@ -1493,6 +1498,7 @@ dvi-host: maybe-dvi-utils + dvi-host: maybe-dvi-c++tools + dvi-host: maybe-dvi-gnattools + dvi-host: maybe-dvi-lto-plugin ++dvi-host: maybe-dvi-bolt-plugin + dvi-host: maybe-dvi-libcc1 + dvi-host: maybe-dvi-gotools + dvi-host: maybe-dvi-libctf +@@ -1583,6 +1589,7 @@ pdf-host: maybe-pdf-utils + pdf-host: maybe-pdf-c++tools + pdf-host: maybe-pdf-gnattools + pdf-host: maybe-pdf-lto-plugin ++pdf-host: maybe-pdf-bolt-plugin + pdf-host: maybe-pdf-libcc1 + pdf-host: maybe-pdf-gotools + pdf-host: maybe-pdf-libctf +@@ -1673,6 +1680,7 @@ html-host: maybe-html-utils + html-host: maybe-html-c++tools + html-host: maybe-html-gnattools + html-host: maybe-html-lto-plugin ++html-host: maybe-html-bolt-plugin + html-host: maybe-html-libcc1 + html-host: maybe-html-gotools + html-host: maybe-html-libctf +@@ -1763,6 +1771,7 @@ TAGS-host: maybe-TAGS-utils + TAGS-host: maybe-TAGS-c++tools + TAGS-host: maybe-TAGS-gnattools + TAGS-host: maybe-TAGS-lto-plugin ++TAGS-host: maybe-TAGS-bolt-plugin + TAGS-host: maybe-TAGS-libcc1 + TAGS-host: maybe-TAGS-gotools + TAGS-host: maybe-TAGS-libctf +@@ -1853,6 +1862,7 @@ install-info-host: maybe-install-info-utils + install-info-host: maybe-install-info-c++tools + install-info-host: maybe-install-info-gnattools + install-info-host: maybe-install-info-lto-plugin ++install-info-host: maybe-install-info-bolt-plugin + install-info-host: maybe-install-info-libcc1 + install-info-host: maybe-install-info-gotools + install-info-host: maybe-install-info-libctf +@@ -1943,6 +1953,7 @@ install-dvi-host: maybe-install-dvi-utils + install-dvi-host: maybe-install-dvi-c++tools + install-dvi-host: maybe-install-dvi-gnattools + install-dvi-host: maybe-install-dvi-lto-plugin ++install-dvi-host: maybe-install-dvi-bolt-plugin + install-dvi-host: maybe-install-dvi-libcc1 + install-dvi-host: maybe-install-dvi-gotools + install-dvi-host: maybe-install-dvi-libctf +@@ -2033,6 +2044,7 @@ install-pdf-host: maybe-install-pdf-utils + install-pdf-host: maybe-install-pdf-c++tools + install-pdf-host: maybe-install-pdf-gnattools + install-pdf-host: maybe-install-pdf-lto-plugin ++install-pdf-host: maybe-install-pdf-bolt-plugin + install-pdf-host: maybe-install-pdf-libcc1 + install-pdf-host: maybe-install-pdf-gotools + install-pdf-host: maybe-install-pdf-libctf +@@ -2123,6 +2135,7 @@ install-html-host: maybe-install-html-utils + install-html-host: maybe-install-html-c++tools + install-html-host: maybe-install-html-gnattools + install-html-host: maybe-install-html-lto-plugin ++install-html-host: maybe-install-html-bolt-plugin + install-html-host: maybe-install-html-libcc1 + install-html-host: maybe-install-html-gotools + install-html-host: maybe-install-html-libctf +@@ -2213,6 +2226,7 @@ installcheck-host: maybe-installcheck-utils + installcheck-host: maybe-installcheck-c++tools + installcheck-host: maybe-installcheck-gnattools + installcheck-host: maybe-installcheck-lto-plugin ++installcheck-host: maybe-installcheck-bolt-plugin + installcheck-host: maybe-installcheck-libcc1 + installcheck-host: maybe-installcheck-gotools + installcheck-host: maybe-installcheck-libctf +@@ -2303,6 +2317,7 @@ mostlyclean-host: maybe-mostlyclean-utils + mostlyclean-host: maybe-mostlyclean-c++tools + mostlyclean-host: maybe-mostlyclean-gnattools + mostlyclean-host: maybe-mostlyclean-lto-plugin ++mostlyclean-host: maybe-mostlyclean-bolt-plugin + mostlyclean-host: maybe-mostlyclean-libcc1 + mostlyclean-host: maybe-mostlyclean-gotools + mostlyclean-host: maybe-mostlyclean-libctf +@@ -2393,6 +2408,7 @@ clean-host: maybe-clean-utils + clean-host: maybe-clean-c++tools + clean-host: maybe-clean-gnattools + clean-host: maybe-clean-lto-plugin ++clean-host: maybe-clean-bolt-plugin + clean-host: maybe-clean-libcc1 + clean-host: maybe-clean-gotools + clean-host: maybe-clean-libctf +@@ -2483,6 +2499,7 @@ distclean-host: maybe-distclean-utils + distclean-host: maybe-distclean-c++tools + distclean-host: maybe-distclean-gnattools + distclean-host: maybe-distclean-lto-plugin ++distclean-host: maybe-distclean-bolt-plugin + distclean-host: maybe-distclean-libcc1 + distclean-host: maybe-distclean-gotools + distclean-host: maybe-distclean-libctf +@@ -2573,6 +2590,7 @@ maintainer-clean-host: maybe-maintainer-clean-utils + maintainer-clean-host: maybe-maintainer-clean-c++tools + maintainer-clean-host: maybe-maintainer-clean-gnattools + maintainer-clean-host: maybe-maintainer-clean-lto-plugin ++maintainer-clean-host: maybe-maintainer-clean-bolt-plugin + maintainer-clean-host: maybe-maintainer-clean-libcc1 + maintainer-clean-host: maybe-maintainer-clean-gotools + maintainer-clean-host: maybe-maintainer-clean-libctf +@@ -2721,6 +2739,7 @@ check-host: \ + maybe-check-c++tools \ + maybe-check-gnattools \ + maybe-check-lto-plugin \ ++ maybe-check-bolt-plugin \ + maybe-check-libcc1 \ + maybe-check-gotools \ + maybe-check-libctf +@@ -2858,6 +2877,7 @@ install-host-nogcc: \ + maybe-install-c++tools \ + maybe-install-gnattools \ + maybe-install-lto-plugin \ ++ maybe-install-bolt-plugin \ + maybe-install-libcc1 \ + maybe-install-gotools \ + maybe-install-libctf +@@ -2913,6 +2933,7 @@ install-host: \ + maybe-install-c++tools \ + maybe-install-gnattools \ + maybe-install-lto-plugin \ ++ maybe-install-bolt-plugin \ + maybe-install-libcc1 \ + maybe-install-gotools \ + maybe-install-libctf +@@ -3023,6 +3044,7 @@ install-strip-host: \ + maybe-install-strip-c++tools \ + maybe-install-strip-gnattools \ + maybe-install-strip-lto-plugin \ ++ maybe-install-strip-bolt-plugin \ + maybe-install-strip-libcc1 \ + maybe-install-strip-gotools \ + maybe-install-strip-libctf +@@ -41493,6 +41515,1155 @@ maintainer-clean-lto-plugin: + + + ++.PHONY: configure-bolt-plugin maybe-configure-bolt-plugin ++maybe-configure-bolt-plugin: ++@if gcc-bootstrap ++configure-bolt-plugin: stage_current ++@endif gcc-bootstrap ++@if bolt-plugin ++maybe-configure-bolt-plugin: configure-bolt-plugin ++configure-bolt-plugin: ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \ ++ $(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \ ++ $(HOST_EXPORTS) \ ++ echo Configuring in $(HOST_SUBDIR)/bolt-plugin; \ ++ cd "$(HOST_SUBDIR)/bolt-plugin" || exit 1; \ ++ case $(srcdir) in \ ++ /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \ ++ *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \ ++ sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \ ++ esac; \ ++ module_srcdir=bolt-plugin; \ ++ $(SHELL) \ ++ $$s/$$module_srcdir/configure \ ++ --srcdir=$${topdir}/$$module_srcdir \ ++ $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \ ++ --target=${target_alias} --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@ \ ++ || exit 1 ++@endif bolt-plugin ++ ++ ++ ++.PHONY: configure-stage1-bolt-plugin maybe-configure-stage1-bolt-plugin ++maybe-configure-stage1-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-configure-stage1-bolt-plugin: configure-stage1-bolt-plugin ++configure-stage1-bolt-plugin: ++ @[ $(current_stage) = stage1 ] || $(MAKE) stage1-start ++ @$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGE1_TFLAGS)"; \ ++ test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \ ++ $(HOST_EXPORTS) \ ++ CFLAGS="$(STAGE1_CFLAGS)"; export CFLAGS; \ ++ CXXFLAGS="$(STAGE1_CXXFLAGS)"; export CXXFLAGS; \ ++ LIBCFLAGS="$(LIBCFLAGS)"; export LIBCFLAGS; \ ++ echo Configuring stage 1 in $(HOST_SUBDIR)/bolt-plugin; \ ++ $(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \ ++ cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \ ++ case $(srcdir) in \ ++ /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \ ++ *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \ ++ sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \ ++ esac; \ ++ module_srcdir=bolt-plugin; \ ++ $(SHELL) $$s/$$module_srcdir/configure \ ++ --srcdir=$${topdir}/$$module_srcdir \ ++ $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \ ++ --target=${target_alias} \ ++ \ ++ $(STAGE1_CONFIGURE_FLAGS) \ ++ --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@ ++@endif bolt-plugin-bootstrap ++ ++.PHONY: configure-stage2-bolt-plugin maybe-configure-stage2-bolt-plugin ++maybe-configure-stage2-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-configure-stage2-bolt-plugin: configure-stage2-bolt-plugin ++configure-stage2-bolt-plugin: ++ @[ $(current_stage) = stage2 ] || $(MAKE) stage2-start ++ @$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGE2_TFLAGS)"; \ ++ test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ CFLAGS="$(STAGE2_CFLAGS)"; export CFLAGS; \ ++ CXXFLAGS="$(STAGE2_CXXFLAGS)"; export CXXFLAGS; \ ++ LIBCFLAGS="$(STAGE2_CFLAGS)"; export LIBCFLAGS; \ ++ echo Configuring stage 2 in $(HOST_SUBDIR)/bolt-plugin; \ ++ $(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \ ++ cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \ ++ case $(srcdir) in \ ++ /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \ ++ *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \ ++ sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \ ++ esac; \ ++ module_srcdir=bolt-plugin; \ ++ $(SHELL) $$s/$$module_srcdir/configure \ ++ --srcdir=$${topdir}/$$module_srcdir \ ++ $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \ ++ --target=${target_alias} \ ++ --with-build-libsubdir=$(HOST_SUBDIR) \ ++ $(STAGE2_CONFIGURE_FLAGS) \ ++ --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@ ++@endif bolt-plugin-bootstrap ++ ++.PHONY: configure-stage3-bolt-plugin maybe-configure-stage3-bolt-plugin ++maybe-configure-stage3-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-configure-stage3-bolt-plugin: configure-stage3-bolt-plugin ++configure-stage3-bolt-plugin: ++ @[ $(current_stage) = stage3 ] || $(MAKE) stage3-start ++ @$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGE3_TFLAGS)"; \ ++ test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ CFLAGS="$(STAGE3_CFLAGS)"; export CFLAGS; \ ++ CXXFLAGS="$(STAGE3_CXXFLAGS)"; export CXXFLAGS; \ ++ LIBCFLAGS="$(STAGE3_CFLAGS)"; export LIBCFLAGS; \ ++ echo Configuring stage 3 in $(HOST_SUBDIR)/bolt-plugin; \ ++ $(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \ ++ cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \ ++ case $(srcdir) in \ ++ /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \ ++ *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \ ++ sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \ ++ esac; \ ++ module_srcdir=bolt-plugin; \ ++ $(SHELL) $$s/$$module_srcdir/configure \ ++ --srcdir=$${topdir}/$$module_srcdir \ ++ $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \ ++ --target=${target_alias} \ ++ --with-build-libsubdir=$(HOST_SUBDIR) \ ++ $(STAGE3_CONFIGURE_FLAGS) \ ++ --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@ ++@endif bolt-plugin-bootstrap ++ ++.PHONY: configure-stage4-bolt-plugin maybe-configure-stage4-bolt-plugin ++maybe-configure-stage4-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-configure-stage4-bolt-plugin: configure-stage4-bolt-plugin ++configure-stage4-bolt-plugin: ++ @[ $(current_stage) = stage4 ] || $(MAKE) stage4-start ++ @$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGE4_TFLAGS)"; \ ++ test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ CFLAGS="$(STAGE4_CFLAGS)"; export CFLAGS; \ ++ CXXFLAGS="$(STAGE4_CXXFLAGS)"; export CXXFLAGS; \ ++ LIBCFLAGS="$(STAGE4_CFLAGS)"; export LIBCFLAGS; \ ++ echo Configuring stage 4 in $(HOST_SUBDIR)/bolt-plugin; \ ++ $(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \ ++ cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \ ++ case $(srcdir) in \ ++ /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \ ++ *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \ ++ sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \ ++ esac; \ ++ module_srcdir=bolt-plugin; \ ++ $(SHELL) $$s/$$module_srcdir/configure \ ++ --srcdir=$${topdir}/$$module_srcdir \ ++ $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \ ++ --target=${target_alias} \ ++ --with-build-libsubdir=$(HOST_SUBDIR) \ ++ $(STAGE4_CONFIGURE_FLAGS) \ ++ --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@ ++@endif bolt-plugin-bootstrap ++ ++.PHONY: configure-stageprofile-bolt-plugin maybe-configure-stageprofile-bolt-plugin ++maybe-configure-stageprofile-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-configure-stageprofile-bolt-plugin: configure-stageprofile-bolt-plugin ++configure-stageprofile-bolt-plugin: ++ @[ $(current_stage) = stageprofile ] || $(MAKE) stageprofile-start ++ @$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGEprofile_TFLAGS)"; \ ++ test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ CFLAGS="$(STAGEprofile_CFLAGS)"; export CFLAGS; \ ++ CXXFLAGS="$(STAGEprofile_CXXFLAGS)"; export CXXFLAGS; \ ++ LIBCFLAGS="$(STAGEprofile_CFLAGS)"; export LIBCFLAGS; \ ++ echo Configuring stage profile in $(HOST_SUBDIR)/bolt-plugin; \ ++ $(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \ ++ cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \ ++ case $(srcdir) in \ ++ /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \ ++ *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \ ++ sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \ ++ esac; \ ++ module_srcdir=bolt-plugin; \ ++ $(SHELL) $$s/$$module_srcdir/configure \ ++ --srcdir=$${topdir}/$$module_srcdir \ ++ $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \ ++ --target=${target_alias} \ ++ --with-build-libsubdir=$(HOST_SUBDIR) \ ++ $(STAGEprofile_CONFIGURE_FLAGS) \ ++ --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@ ++@endif bolt-plugin-bootstrap ++ ++.PHONY: configure-stagetrain-bolt-plugin maybe-configure-stagetrain-bolt-plugin ++maybe-configure-stagetrain-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-configure-stagetrain-bolt-plugin: configure-stagetrain-bolt-plugin ++configure-stagetrain-bolt-plugin: ++ @[ $(current_stage) = stagetrain ] || $(MAKE) stagetrain-start ++ @$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGEtrain_TFLAGS)"; \ ++ test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ CFLAGS="$(STAGEtrain_CFLAGS)"; export CFLAGS; \ ++ CXXFLAGS="$(STAGEtrain_CXXFLAGS)"; export CXXFLAGS; \ ++ LIBCFLAGS="$(STAGEtrain_CFLAGS)"; export LIBCFLAGS; \ ++ echo Configuring stage train in $(HOST_SUBDIR)/bolt-plugin; \ ++ $(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \ ++ cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \ ++ case $(srcdir) in \ ++ /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \ ++ *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \ ++ sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \ ++ esac; \ ++ module_srcdir=bolt-plugin; \ ++ $(SHELL) $$s/$$module_srcdir/configure \ ++ --srcdir=$${topdir}/$$module_srcdir \ ++ $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \ ++ --target=${target_alias} \ ++ --with-build-libsubdir=$(HOST_SUBDIR) \ ++ $(STAGEtrain_CONFIGURE_FLAGS) \ ++ --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@ ++@endif bolt-plugin-bootstrap ++ ++.PHONY: configure-stagefeedback-bolt-plugin maybe-configure-stagefeedback-bolt-plugin ++maybe-configure-stagefeedback-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-configure-stagefeedback-bolt-plugin: configure-stagefeedback-bolt-plugin ++configure-stagefeedback-bolt-plugin: ++ @[ $(current_stage) = stagefeedback ] || $(MAKE) stagefeedback-start ++ @$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGEfeedback_TFLAGS)"; \ ++ test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ CFLAGS="$(STAGEfeedback_CFLAGS)"; export CFLAGS; \ ++ CXXFLAGS="$(STAGEfeedback_CXXFLAGS)"; export CXXFLAGS; \ ++ LIBCFLAGS="$(STAGEfeedback_CFLAGS)"; export LIBCFLAGS; \ ++ echo Configuring stage feedback in $(HOST_SUBDIR)/bolt-plugin; \ ++ $(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \ ++ cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \ ++ case $(srcdir) in \ ++ /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \ ++ *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \ ++ sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \ ++ esac; \ ++ module_srcdir=bolt-plugin; \ ++ $(SHELL) $$s/$$module_srcdir/configure \ ++ --srcdir=$${topdir}/$$module_srcdir \ ++ $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \ ++ --target=${target_alias} \ ++ --with-build-libsubdir=$(HOST_SUBDIR) \ ++ $(STAGEfeedback_CONFIGURE_FLAGS) \ ++ --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@ ++@endif bolt-plugin-bootstrap ++ ++.PHONY: configure-stageautoprofile-bolt-plugin maybe-configure-stageautoprofile-bolt-plugin ++maybe-configure-stageautoprofile-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-configure-stageautoprofile-bolt-plugin: configure-stageautoprofile-bolt-plugin ++configure-stageautoprofile-bolt-plugin: ++ @[ $(current_stage) = stageautoprofile ] || $(MAKE) stageautoprofile-start ++ @$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGEautoprofile_TFLAGS)"; \ ++ test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ CFLAGS="$(STAGEautoprofile_CFLAGS)"; export CFLAGS; \ ++ CXXFLAGS="$(STAGEautoprofile_CXXFLAGS)"; export CXXFLAGS; \ ++ LIBCFLAGS="$(STAGEautoprofile_CFLAGS)"; export LIBCFLAGS; \ ++ echo Configuring stage autoprofile in $(HOST_SUBDIR)/bolt-plugin; \ ++ $(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \ ++ cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \ ++ case $(srcdir) in \ ++ /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \ ++ *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \ ++ sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \ ++ esac; \ ++ module_srcdir=bolt-plugin; \ ++ $(SHELL) $$s/$$module_srcdir/configure \ ++ --srcdir=$${topdir}/$$module_srcdir \ ++ $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \ ++ --target=${target_alias} \ ++ --with-build-libsubdir=$(HOST_SUBDIR) \ ++ $(STAGEautoprofile_CONFIGURE_FLAGS) \ ++ --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@ ++@endif bolt-plugin-bootstrap ++ ++.PHONY: configure-stageautofeedback-bolt-plugin maybe-configure-stageautofeedback-bolt-plugin ++maybe-configure-stageautofeedback-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-configure-stageautofeedback-bolt-plugin: configure-stageautofeedback-bolt-plugin ++configure-stageautofeedback-bolt-plugin: ++ @[ $(current_stage) = stageautofeedback ] || $(MAKE) stageautofeedback-start ++ @$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGEautofeedback_TFLAGS)"; \ ++ test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ CFLAGS="$(STAGEautofeedback_CFLAGS)"; export CFLAGS; \ ++ CXXFLAGS="$(STAGEautofeedback_CXXFLAGS)"; export CXXFLAGS; \ ++ LIBCFLAGS="$(STAGEautofeedback_CFLAGS)"; export LIBCFLAGS; \ ++ echo Configuring stage autofeedback in $(HOST_SUBDIR)/bolt-plugin; \ ++ $(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \ ++ cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \ ++ case $(srcdir) in \ ++ /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \ ++ *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \ ++ sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \ ++ esac; \ ++ module_srcdir=bolt-plugin; \ ++ $(SHELL) $$s/$$module_srcdir/configure \ ++ --srcdir=$${topdir}/$$module_srcdir \ ++ $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \ ++ --target=${target_alias} \ ++ --with-build-libsubdir=$(HOST_SUBDIR) \ ++ $(STAGEautofeedback_CONFIGURE_FLAGS) \ ++ --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@ ++@endif bolt-plugin-bootstrap ++ ++ ++ ++ ++ ++.PHONY: all-bolt-plugin maybe-all-bolt-plugin ++maybe-all-bolt-plugin: ++@if gcc-bootstrap ++all-bolt-plugin: stage_current ++@endif gcc-bootstrap ++@if bolt-plugin ++TARGET-bolt-plugin=all ++maybe-all-bolt-plugin: all-bolt-plugin ++all-bolt-plugin: configure-bolt-plugin ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) $(EXTRA_HOST_FLAGS) $(STAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \ ++ $(TARGET-bolt-plugin)) ++@endif bolt-plugin ++ ++ ++ ++.PHONY: all-stage1-bolt-plugin maybe-all-stage1-bolt-plugin ++.PHONY: clean-stage1-bolt-plugin maybe-clean-stage1-bolt-plugin ++maybe-all-stage1-bolt-plugin: ++maybe-clean-stage1-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-all-stage1-bolt-plugin: all-stage1-bolt-plugin ++all-stage1: all-stage1-bolt-plugin ++TARGET-stage1-bolt-plugin = $(TARGET-bolt-plugin) ++all-stage1-bolt-plugin: configure-stage1-bolt-plugin ++ @[ $(current_stage) = stage1 ] || $(MAKE) stage1-start ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGE1_TFLAGS)"; \ ++ $(HOST_EXPORTS) \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) \ ++ CFLAGS="$(STAGE1_CFLAGS)" \ ++ GENERATOR_CFLAGS="$(STAGE1_GENERATOR_CFLAGS)" \ ++ CXXFLAGS="$(STAGE1_CXXFLAGS)" \ ++ LIBCFLAGS="$(LIBCFLAGS)" \ ++ CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \ ++ CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \ ++ LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \ ++ $(EXTRA_HOST_FLAGS) \ ++ $(STAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \ ++ TFLAGS="$(STAGE1_TFLAGS)" \ ++ $(TARGET-stage1-bolt-plugin) ++ ++maybe-clean-stage1-bolt-plugin: clean-stage1-bolt-plugin ++clean-stage1: clean-stage1-bolt-plugin ++clean-stage1-bolt-plugin: ++ @if [ $(current_stage) = stage1 ]; then \ ++ [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \ ++ else \ ++ [ -f $(HOST_SUBDIR)/stage1-bolt-plugin/Makefile ] || exit 0; \ ++ $(MAKE) stage1-start; \ ++ fi; \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(EXTRA_HOST_FLAGS) \ ++ $(STAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean ++@endif bolt-plugin-bootstrap ++ ++ ++.PHONY: all-stage2-bolt-plugin maybe-all-stage2-bolt-plugin ++.PHONY: clean-stage2-bolt-plugin maybe-clean-stage2-bolt-plugin ++maybe-all-stage2-bolt-plugin: ++maybe-clean-stage2-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-all-stage2-bolt-plugin: all-stage2-bolt-plugin ++all-stage2: all-stage2-bolt-plugin ++TARGET-stage2-bolt-plugin = $(TARGET-bolt-plugin) ++all-stage2-bolt-plugin: configure-stage2-bolt-plugin ++ @[ $(current_stage) = stage2 ] || $(MAKE) stage2-start ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGE2_TFLAGS)"; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) \ ++ CFLAGS="$(STAGE2_CFLAGS)" \ ++ GENERATOR_CFLAGS="$(STAGE2_GENERATOR_CFLAGS)" \ ++ CXXFLAGS="$(STAGE2_CXXFLAGS)" \ ++ LIBCFLAGS="$(STAGE2_CFLAGS)" \ ++ CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \ ++ CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \ ++ LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \ ++ $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \ ++ TFLAGS="$(STAGE2_TFLAGS)" \ ++ $(TARGET-stage2-bolt-plugin) ++ ++maybe-clean-stage2-bolt-plugin: clean-stage2-bolt-plugin ++clean-stage2: clean-stage2-bolt-plugin ++clean-stage2-bolt-plugin: ++ @if [ $(current_stage) = stage2 ]; then \ ++ [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \ ++ else \ ++ [ -f $(HOST_SUBDIR)/stage2-bolt-plugin/Makefile ] || exit 0; \ ++ $(MAKE) stage2-start; \ ++ fi; \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean ++@endif bolt-plugin-bootstrap ++ ++ ++.PHONY: all-stage3-bolt-plugin maybe-all-stage3-bolt-plugin ++.PHONY: clean-stage3-bolt-plugin maybe-clean-stage3-bolt-plugin ++maybe-all-stage3-bolt-plugin: ++maybe-clean-stage3-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-all-stage3-bolt-plugin: all-stage3-bolt-plugin ++all-stage3: all-stage3-bolt-plugin ++TARGET-stage3-bolt-plugin = $(TARGET-bolt-plugin) ++all-stage3-bolt-plugin: configure-stage3-bolt-plugin ++ @[ $(current_stage) = stage3 ] || $(MAKE) stage3-start ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGE3_TFLAGS)"; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) \ ++ CFLAGS="$(STAGE3_CFLAGS)" \ ++ GENERATOR_CFLAGS="$(STAGE3_GENERATOR_CFLAGS)" \ ++ CXXFLAGS="$(STAGE3_CXXFLAGS)" \ ++ LIBCFLAGS="$(STAGE3_CFLAGS)" \ ++ CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \ ++ CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \ ++ LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \ ++ $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \ ++ TFLAGS="$(STAGE3_TFLAGS)" \ ++ $(TARGET-stage3-bolt-plugin) ++ ++maybe-clean-stage3-bolt-plugin: clean-stage3-bolt-plugin ++clean-stage3: clean-stage3-bolt-plugin ++clean-stage3-bolt-plugin: ++ @if [ $(current_stage) = stage3 ]; then \ ++ [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \ ++ else \ ++ [ -f $(HOST_SUBDIR)/stage3-bolt-plugin/Makefile ] || exit 0; \ ++ $(MAKE) stage3-start; \ ++ fi; \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean ++@endif bolt-plugin-bootstrap ++ ++ ++.PHONY: all-stage4-bolt-plugin maybe-all-stage4-bolt-plugin ++.PHONY: clean-stage4-bolt-plugin maybe-clean-stage4-bolt-plugin ++maybe-all-stage4-bolt-plugin: ++maybe-clean-stage4-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-all-stage4-bolt-plugin: all-stage4-bolt-plugin ++all-stage4: all-stage4-bolt-plugin ++TARGET-stage4-bolt-plugin = $(TARGET-bolt-plugin) ++all-stage4-bolt-plugin: configure-stage4-bolt-plugin ++ @[ $(current_stage) = stage4 ] || $(MAKE) stage4-start ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGE4_TFLAGS)"; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) \ ++ CFLAGS="$(STAGE4_CFLAGS)" \ ++ GENERATOR_CFLAGS="$(STAGE4_GENERATOR_CFLAGS)" \ ++ CXXFLAGS="$(STAGE4_CXXFLAGS)" \ ++ LIBCFLAGS="$(STAGE4_CFLAGS)" \ ++ CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \ ++ CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \ ++ LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \ ++ $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \ ++ TFLAGS="$(STAGE4_TFLAGS)" \ ++ $(TARGET-stage4-bolt-plugin) ++ ++maybe-clean-stage4-bolt-plugin: clean-stage4-bolt-plugin ++clean-stage4: clean-stage4-bolt-plugin ++clean-stage4-bolt-plugin: ++ @if [ $(current_stage) = stage4 ]; then \ ++ [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \ ++ else \ ++ [ -f $(HOST_SUBDIR)/stage4-bolt-plugin/Makefile ] || exit 0; \ ++ $(MAKE) stage4-start; \ ++ fi; \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean ++@endif bolt-plugin-bootstrap ++ ++ ++.PHONY: all-stageprofile-bolt-plugin maybe-all-stageprofile-bolt-plugin ++.PHONY: clean-stageprofile-bolt-plugin maybe-clean-stageprofile-bolt-plugin ++maybe-all-stageprofile-bolt-plugin: ++maybe-clean-stageprofile-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-all-stageprofile-bolt-plugin: all-stageprofile-bolt-plugin ++all-stageprofile: all-stageprofile-bolt-plugin ++TARGET-stageprofile-bolt-plugin = $(TARGET-bolt-plugin) ++all-stageprofile-bolt-plugin: configure-stageprofile-bolt-plugin ++ @[ $(current_stage) = stageprofile ] || $(MAKE) stageprofile-start ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGEprofile_TFLAGS)"; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) \ ++ CFLAGS="$(STAGEprofile_CFLAGS)" \ ++ GENERATOR_CFLAGS="$(STAGEprofile_GENERATOR_CFLAGS)" \ ++ CXXFLAGS="$(STAGEprofile_CXXFLAGS)" \ ++ LIBCFLAGS="$(STAGEprofile_CFLAGS)" \ ++ CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \ ++ CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \ ++ LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \ ++ $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \ ++ TFLAGS="$(STAGEprofile_TFLAGS)" \ ++ $(TARGET-stageprofile-bolt-plugin) ++ ++maybe-clean-stageprofile-bolt-plugin: clean-stageprofile-bolt-plugin ++clean-stageprofile: clean-stageprofile-bolt-plugin ++clean-stageprofile-bolt-plugin: ++ @if [ $(current_stage) = stageprofile ]; then \ ++ [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \ ++ else \ ++ [ -f $(HOST_SUBDIR)/stageprofile-bolt-plugin/Makefile ] || exit 0; \ ++ $(MAKE) stageprofile-start; \ ++ fi; \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean ++@endif bolt-plugin-bootstrap ++ ++ ++.PHONY: all-stagetrain-bolt-plugin maybe-all-stagetrain-bolt-plugin ++.PHONY: clean-stagetrain-bolt-plugin maybe-clean-stagetrain-bolt-plugin ++maybe-all-stagetrain-bolt-plugin: ++maybe-clean-stagetrain-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-all-stagetrain-bolt-plugin: all-stagetrain-bolt-plugin ++all-stagetrain: all-stagetrain-bolt-plugin ++TARGET-stagetrain-bolt-plugin = $(TARGET-bolt-plugin) ++all-stagetrain-bolt-plugin: configure-stagetrain-bolt-plugin ++ @[ $(current_stage) = stagetrain ] || $(MAKE) stagetrain-start ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGEtrain_TFLAGS)"; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) \ ++ CFLAGS="$(STAGEtrain_CFLAGS)" \ ++ GENERATOR_CFLAGS="$(STAGEtrain_GENERATOR_CFLAGS)" \ ++ CXXFLAGS="$(STAGEtrain_CXXFLAGS)" \ ++ LIBCFLAGS="$(STAGEtrain_CFLAGS)" \ ++ CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \ ++ CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \ ++ LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \ ++ $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \ ++ TFLAGS="$(STAGEtrain_TFLAGS)" \ ++ $(TARGET-stagetrain-bolt-plugin) ++ ++maybe-clean-stagetrain-bolt-plugin: clean-stagetrain-bolt-plugin ++clean-stagetrain: clean-stagetrain-bolt-plugin ++clean-stagetrain-bolt-plugin: ++ @if [ $(current_stage) = stagetrain ]; then \ ++ [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \ ++ else \ ++ [ -f $(HOST_SUBDIR)/stagetrain-bolt-plugin/Makefile ] || exit 0; \ ++ $(MAKE) stagetrain-start; \ ++ fi; \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean ++@endif bolt-plugin-bootstrap ++ ++ ++.PHONY: all-stagefeedback-bolt-plugin maybe-all-stagefeedback-bolt-plugin ++.PHONY: clean-stagefeedback-bolt-plugin maybe-clean-stagefeedback-bolt-plugin ++maybe-all-stagefeedback-bolt-plugin: ++maybe-clean-stagefeedback-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-all-stagefeedback-bolt-plugin: all-stagefeedback-bolt-plugin ++all-stagefeedback: all-stagefeedback-bolt-plugin ++TARGET-stagefeedback-bolt-plugin = $(TARGET-bolt-plugin) ++all-stagefeedback-bolt-plugin: configure-stagefeedback-bolt-plugin ++ @[ $(current_stage) = stagefeedback ] || $(MAKE) stagefeedback-start ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGEfeedback_TFLAGS)"; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) \ ++ CFLAGS="$(STAGEfeedback_CFLAGS)" \ ++ GENERATOR_CFLAGS="$(STAGEfeedback_GENERATOR_CFLAGS)" \ ++ CXXFLAGS="$(STAGEfeedback_CXXFLAGS)" \ ++ LIBCFLAGS="$(STAGEfeedback_CFLAGS)" \ ++ CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \ ++ CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \ ++ LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \ ++ $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \ ++ TFLAGS="$(STAGEfeedback_TFLAGS)" \ ++ $(TARGET-stagefeedback-bolt-plugin) ++ ++maybe-clean-stagefeedback-bolt-plugin: clean-stagefeedback-bolt-plugin ++clean-stagefeedback: clean-stagefeedback-bolt-plugin ++clean-stagefeedback-bolt-plugin: ++ @if [ $(current_stage) = stagefeedback ]; then \ ++ [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \ ++ else \ ++ [ -f $(HOST_SUBDIR)/stagefeedback-bolt-plugin/Makefile ] || exit 0; \ ++ $(MAKE) stagefeedback-start; \ ++ fi; \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean ++@endif bolt-plugin-bootstrap ++ ++ ++.PHONY: all-stageautoprofile-bolt-plugin maybe-all-stageautoprofile-bolt-plugin ++.PHONY: clean-stageautoprofile-bolt-plugin maybe-clean-stageautoprofile-bolt-plugin ++maybe-all-stageautoprofile-bolt-plugin: ++maybe-clean-stageautoprofile-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-all-stageautoprofile-bolt-plugin: all-stageautoprofile-bolt-plugin ++all-stageautoprofile: all-stageautoprofile-bolt-plugin ++TARGET-stageautoprofile-bolt-plugin = $(TARGET-bolt-plugin) ++all-stageautoprofile-bolt-plugin: configure-stageautoprofile-bolt-plugin ++ @[ $(current_stage) = stageautoprofile ] || $(MAKE) stageautoprofile-start ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGEautoprofile_TFLAGS)"; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $$s/gcc/config/i386/$(AUTO_PROFILE) \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) \ ++ CFLAGS="$(STAGEautoprofile_CFLAGS)" \ ++ GENERATOR_CFLAGS="$(STAGEautoprofile_GENERATOR_CFLAGS)" \ ++ CXXFLAGS="$(STAGEautoprofile_CXXFLAGS)" \ ++ LIBCFLAGS="$(STAGEautoprofile_CFLAGS)" \ ++ CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \ ++ CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \ ++ LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \ ++ $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \ ++ TFLAGS="$(STAGEautoprofile_TFLAGS)" \ ++ $(TARGET-stageautoprofile-bolt-plugin) ++ ++maybe-clean-stageautoprofile-bolt-plugin: clean-stageautoprofile-bolt-plugin ++clean-stageautoprofile: clean-stageautoprofile-bolt-plugin ++clean-stageautoprofile-bolt-plugin: ++ @if [ $(current_stage) = stageautoprofile ]; then \ ++ [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \ ++ else \ ++ [ -f $(HOST_SUBDIR)/stageautoprofile-bolt-plugin/Makefile ] || exit 0; \ ++ $(MAKE) stageautoprofile-start; \ ++ fi; \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean ++@endif bolt-plugin-bootstrap ++ ++ ++.PHONY: all-stageautofeedback-bolt-plugin maybe-all-stageautofeedback-bolt-plugin ++.PHONY: clean-stageautofeedback-bolt-plugin maybe-clean-stageautofeedback-bolt-plugin ++maybe-all-stageautofeedback-bolt-plugin: ++maybe-clean-stageautofeedback-bolt-plugin: ++@if bolt-plugin-bootstrap ++maybe-all-stageautofeedback-bolt-plugin: all-stageautofeedback-bolt-plugin ++all-stageautofeedback: all-stageautofeedback-bolt-plugin ++TARGET-stageautofeedback-bolt-plugin = $(TARGET-bolt-plugin) ++all-stageautofeedback-bolt-plugin: configure-stageautofeedback-bolt-plugin ++ @[ $(current_stage) = stageautofeedback ] || $(MAKE) stageautofeedback-start ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ TFLAGS="$(STAGEautofeedback_TFLAGS)"; \ ++ $(HOST_EXPORTS) \ ++ $(POSTSTAGE1_HOST_EXPORTS) \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) \ ++ CFLAGS="$(STAGEautofeedback_CFLAGS)" \ ++ GENERATOR_CFLAGS="$(STAGEautofeedback_GENERATOR_CFLAGS)" \ ++ CXXFLAGS="$(STAGEautofeedback_CXXFLAGS)" \ ++ LIBCFLAGS="$(STAGEautofeedback_CFLAGS)" \ ++ CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \ ++ CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \ ++ LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \ ++ $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \ ++ TFLAGS="$(STAGEautofeedback_TFLAGS)" PERF_DATA=perf.data \ ++ $(TARGET-stageautofeedback-bolt-plugin) ++ ++maybe-clean-stageautofeedback-bolt-plugin: clean-stageautofeedback-bolt-plugin ++clean-stageautofeedback: clean-stageautofeedback-bolt-plugin ++clean-stageautofeedback-bolt-plugin: ++ @if [ $(current_stage) = stageautofeedback ]; then \ ++ [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \ ++ else \ ++ [ -f $(HOST_SUBDIR)/stageautofeedback-bolt-plugin/Makefile ] || exit 0; \ ++ $(MAKE) stageautofeedback-start; \ ++ fi; \ ++ cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean ++@endif bolt-plugin-bootstrap ++ ++ ++ ++ ++ ++.PHONY: check-bolt-plugin maybe-check-bolt-plugin ++maybe-check-bolt-plugin: ++@if bolt-plugin ++maybe-check-bolt-plugin: check-bolt-plugin ++ ++check-bolt-plugin: ++ @: $(MAKE); $(unstage) ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) $(EXTRA_HOST_EXPORTS) \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(FLAGS_TO_PASS) @extra_linker_plugin_flags@ $(EXTRA_BOOTSTRAP_FLAGS) check) ++ ++@endif bolt-plugin ++ ++.PHONY: install-bolt-plugin maybe-install-bolt-plugin ++maybe-install-bolt-plugin: ++@if bolt-plugin ++maybe-install-bolt-plugin: install-bolt-plugin ++ ++install-bolt-plugin: installdirs ++ @: $(MAKE); $(unstage) ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(FLAGS_TO_PASS) @extra_linker_plugin_flags@ install) ++ ++@endif bolt-plugin ++ ++.PHONY: install-strip-bolt-plugin maybe-install-strip-bolt-plugin ++maybe-install-strip-bolt-plugin: ++@if bolt-plugin ++maybe-install-strip-bolt-plugin: install-strip-bolt-plugin ++ ++install-strip-bolt-plugin: installdirs ++ @: $(MAKE); $(unstage) ++ @r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(FLAGS_TO_PASS) @extra_linker_plugin_flags@ install-strip) ++ ++@endif bolt-plugin ++ ++# Other targets (info, dvi, pdf, etc.) ++ ++.PHONY: maybe-info-bolt-plugin info-bolt-plugin ++maybe-info-bolt-plugin: ++@if bolt-plugin ++maybe-info-bolt-plugin: info-bolt-plugin ++ ++info-bolt-plugin: \ ++ configure-bolt-plugin ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing info in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ info) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++.PHONY: maybe-dvi-bolt-plugin dvi-bolt-plugin ++maybe-dvi-bolt-plugin: ++@if bolt-plugin ++maybe-dvi-bolt-plugin: dvi-bolt-plugin ++ ++dvi-bolt-plugin: \ ++ configure-bolt-plugin ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing dvi in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ dvi) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++.PHONY: maybe-pdf-bolt-plugin pdf-bolt-plugin ++maybe-pdf-bolt-plugin: ++@if bolt-plugin ++maybe-pdf-bolt-plugin: pdf-bolt-plugin ++ ++pdf-bolt-plugin: \ ++ configure-bolt-plugin ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing pdf in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ pdf) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++.PHONY: maybe-html-bolt-plugin html-bolt-plugin ++maybe-html-bolt-plugin: ++@if bolt-plugin ++maybe-html-bolt-plugin: html-bolt-plugin ++ ++html-bolt-plugin: \ ++ configure-bolt-plugin ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing html in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ html) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++.PHONY: maybe-TAGS-bolt-plugin TAGS-bolt-plugin ++maybe-TAGS-bolt-plugin: ++@if bolt-plugin ++maybe-TAGS-bolt-plugin: TAGS-bolt-plugin ++ ++TAGS-bolt-plugin: \ ++ configure-bolt-plugin ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing TAGS in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ TAGS) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++.PHONY: maybe-install-info-bolt-plugin install-info-bolt-plugin ++maybe-install-info-bolt-plugin: ++@if bolt-plugin ++maybe-install-info-bolt-plugin: install-info-bolt-plugin ++ ++install-info-bolt-plugin: \ ++ configure-bolt-plugin \ ++ info-bolt-plugin ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing install-info in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ install-info) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++.PHONY: maybe-install-dvi-bolt-plugin install-dvi-bolt-plugin ++maybe-install-dvi-bolt-plugin: ++@if bolt-plugin ++maybe-install-dvi-bolt-plugin: install-dvi-bolt-plugin ++ ++install-dvi-bolt-plugin: \ ++ configure-bolt-plugin \ ++ dvi-bolt-plugin ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing install-dvi in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ install-dvi) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++.PHONY: maybe-install-pdf-bolt-plugin install-pdf-bolt-plugin ++maybe-install-pdf-bolt-plugin: ++@if bolt-plugin ++maybe-install-pdf-bolt-plugin: install-pdf-bolt-plugin ++ ++install-pdf-bolt-plugin: \ ++ configure-bolt-plugin \ ++ pdf-bolt-plugin ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing install-pdf in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ install-pdf) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++.PHONY: maybe-install-html-bolt-plugin install-html-bolt-plugin ++maybe-install-html-bolt-plugin: ++@if bolt-plugin ++maybe-install-html-bolt-plugin: install-html-bolt-plugin ++ ++install-html-bolt-plugin: \ ++ configure-bolt-plugin \ ++ html-bolt-plugin ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing install-html in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ install-html) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++.PHONY: maybe-installcheck-bolt-plugin installcheck-bolt-plugin ++maybe-installcheck-bolt-plugin: ++@if bolt-plugin ++maybe-installcheck-bolt-plugin: installcheck-bolt-plugin ++ ++installcheck-bolt-plugin: \ ++ configure-bolt-plugin ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing installcheck in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ installcheck) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++.PHONY: maybe-mostlyclean-bolt-plugin mostlyclean-bolt-plugin ++maybe-mostlyclean-bolt-plugin: ++@if bolt-plugin ++maybe-mostlyclean-bolt-plugin: mostlyclean-bolt-plugin ++ ++mostlyclean-bolt-plugin: ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing mostlyclean in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ mostlyclean) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++.PHONY: maybe-clean-bolt-plugin clean-bolt-plugin ++maybe-clean-bolt-plugin: ++@if bolt-plugin ++maybe-clean-bolt-plugin: clean-bolt-plugin ++ ++clean-bolt-plugin: ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing clean in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ clean) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++.PHONY: maybe-distclean-bolt-plugin distclean-bolt-plugin ++maybe-distclean-bolt-plugin: ++@if bolt-plugin ++maybe-distclean-bolt-plugin: distclean-bolt-plugin ++ ++distclean-bolt-plugin: ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing distclean in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ distclean) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++.PHONY: maybe-maintainer-clean-bolt-plugin maintainer-clean-bolt-plugin ++maybe-maintainer-clean-bolt-plugin: ++@if bolt-plugin ++maybe-maintainer-clean-bolt-plugin: maintainer-clean-bolt-plugin ++ ++maintainer-clean-bolt-plugin: ++ @[ -f ./bolt-plugin/Makefile ] || exit 0; \ ++ r=`${PWD_COMMAND}`; export r; \ ++ s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \ ++ $(HOST_EXPORTS) \ ++ for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \ ++ eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \ ++ done; \ ++ echo "Doing maintainer-clean in bolt-plugin"; \ ++ (cd $(HOST_SUBDIR)/bolt-plugin && \ ++ $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \ ++ "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \ ++ "RANLIB=$${RANLIB}" \ ++ "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \ ++ maintainer-clean) \ ++ || exit 1 ++ ++@endif bolt-plugin ++ ++ ++ + .PHONY: configure-libcc1 maybe-configure-libcc1 + maybe-configure-libcc1: + @if gcc-bootstrap +@@ -61940,6 +63111,11 @@ stage1-start:: + mkdir stage1-lto-plugin; \ + mv stage1-lto-plugin lto-plugin + @endif lto-plugin ++@if bolt-plugin ++ @cd $(HOST_SUBDIR); [ -d stage1-bolt-plugin ] || \ ++ mkdir stage1-bolt-plugin; \ ++ mv stage1-bolt-plugin bolt-plugin ++@endif bolt-plugin + @if libctf + @cd $(HOST_SUBDIR); [ -d stage1-libctf ] || \ + mkdir stage1-libctf; \ +@@ -62065,6 +63241,11 @@ stage1-end:: + cd $(HOST_SUBDIR); mv lto-plugin stage1-lto-plugin; \ + fi + @endif lto-plugin ++@if bolt-plugin ++ @if test -d $(HOST_SUBDIR)/bolt-plugin; then \ ++ cd $(HOST_SUBDIR); mv bolt-plugin stage1-bolt-plugin; \ ++ fi ++@endif bolt-plugin + @if libctf + @if test -d $(HOST_SUBDIR)/libctf; then \ + cd $(HOST_SUBDIR); mv libctf stage1-libctf; \ +@@ -62256,6 +63437,12 @@ stage2-start:: + mv stage2-lto-plugin lto-plugin; \ + mv stage1-lto-plugin prev-lto-plugin || test -f stage1-lean + @endif lto-plugin ++@if bolt-plugin ++ @cd $(HOST_SUBDIR); [ -d stage2-bolt-plugin ] || \ ++ mkdir stage2-bolt-plugin; \ ++ mv stage2-bolt-plugin bolt-plugin; \ ++ mv stage1-bolt-plugin prev-bolt-plugin || test -f stage1-lean ++@endif bolt-plugin + @if libctf + @cd $(HOST_SUBDIR); [ -d stage2-libctf ] || \ + mkdir stage2-libctf; \ +@@ -62406,6 +63593,12 @@ stage2-end:: + mv prev-lto-plugin stage1-lto-plugin; : ; \ + fi + @endif lto-plugin ++@if bolt-plugin ++ @if test -d $(HOST_SUBDIR)/bolt-plugin; then \ ++ cd $(HOST_SUBDIR); mv bolt-plugin stage2-bolt-plugin; \ ++ mv prev-bolt-plugin stage1-bolt-plugin; : ; \ ++ fi ++@endif bolt-plugin + @if libctf + @if test -d $(HOST_SUBDIR)/libctf; then \ + cd $(HOST_SUBDIR); mv libctf stage2-libctf; \ +@@ -62622,6 +63815,12 @@ stage3-start:: + mv stage3-lto-plugin lto-plugin; \ + mv stage2-lto-plugin prev-lto-plugin || test -f stage2-lean + @endif lto-plugin ++@if bolt-plugin ++ @cd $(HOST_SUBDIR); [ -d stage3-bolt-plugin ] || \ ++ mkdir stage3-bolt-plugin; \ ++ mv stage3-bolt-plugin bolt-plugin; \ ++ mv stage2-bolt-plugin prev-bolt-plugin || test -f stage2-lean ++@endif bolt-plugin + @if libctf + @cd $(HOST_SUBDIR); [ -d stage3-libctf ] || \ + mkdir stage3-libctf; \ +@@ -62772,6 +63971,12 @@ stage3-end:: + mv prev-lto-plugin stage2-lto-plugin; : ; \ + fi + @endif lto-plugin ++@if bolt-plugin ++ @if test -d $(HOST_SUBDIR)/bolt-plugin; then \ ++ cd $(HOST_SUBDIR); mv bolt-plugin stage3-bolt-plugin; \ ++ mv prev-bolt-plugin stage2-bolt-plugin; : ; \ ++ fi ++@endif bolt-plugin + @if libctf + @if test -d $(HOST_SUBDIR)/libctf; then \ + cd $(HOST_SUBDIR); mv libctf stage3-libctf; \ +@@ -63044,6 +64249,12 @@ stage4-start:: + mv stage4-lto-plugin lto-plugin; \ + mv stage3-lto-plugin prev-lto-plugin || test -f stage3-lean + @endif lto-plugin ++@if bolt-plugin ++ @cd $(HOST_SUBDIR); [ -d stage4-bolt-plugin ] || \ ++ mkdir stage4-bolt-plugin; \ ++ mv stage4-bolt-plugin bolt-plugin; \ ++ mv stage3-bolt-plugin prev-bolt-plugin || test -f stage3-lean ++@endif bolt-plugin + @if libctf + @cd $(HOST_SUBDIR); [ -d stage4-libctf ] || \ + mkdir stage4-libctf; \ +@@ -63194,6 +64405,12 @@ stage4-end:: + mv prev-lto-plugin stage3-lto-plugin; : ; \ + fi + @endif lto-plugin ++@if bolt-plugin ++ @if test -d $(HOST_SUBDIR)/bolt-plugin; then \ ++ cd $(HOST_SUBDIR); mv bolt-plugin stage4-bolt-plugin; \ ++ mv prev-bolt-plugin stage3-bolt-plugin; : ; \ ++ fi ++@endif bolt-plugin + @if libctf + @if test -d $(HOST_SUBDIR)/libctf; then \ + cd $(HOST_SUBDIR); mv libctf stage4-libctf; \ +@@ -63454,6 +64671,12 @@ stageprofile-start:: + mv stageprofile-lto-plugin lto-plugin; \ + mv stage1-lto-plugin prev-lto-plugin || test -f stage1-lean + @endif lto-plugin ++@if bolt-plugin ++ @cd $(HOST_SUBDIR); [ -d stageprofile-bolt-plugin ] || \ ++ mkdir stageprofile-bolt-plugin; \ ++ mv stageprofile-bolt-plugin bolt-plugin; \ ++ mv stage1-bolt-plugin prev-bolt-plugin || test -f stage1-lean ++@endif bolt-plugin + @if libctf + @cd $(HOST_SUBDIR); [ -d stageprofile-libctf ] || \ + mkdir stageprofile-libctf; \ +@@ -63604,6 +64827,12 @@ stageprofile-end:: + mv prev-lto-plugin stage1-lto-plugin; : ; \ + fi + @endif lto-plugin ++@if bolt-plugin ++ @if test -d $(HOST_SUBDIR)/bolt-plugin; then \ ++ cd $(HOST_SUBDIR); mv bolt-plugin stageprofile-bolt-plugin; \ ++ mv prev-bolt-plugin stage1-bolt-plugin; : ; \ ++ fi ++@endif bolt-plugin + @if libctf + @if test -d $(HOST_SUBDIR)/libctf; then \ + cd $(HOST_SUBDIR); mv libctf stageprofile-libctf; \ +@@ -63797,6 +65026,12 @@ stagetrain-start:: + mv stagetrain-lto-plugin lto-plugin; \ + mv stageprofile-lto-plugin prev-lto-plugin || test -f stageprofile-lean + @endif lto-plugin ++@if bolt-plugin ++ @cd $(HOST_SUBDIR); [ -d stagetrain-bolt-plugin ] || \ ++ mkdir stagetrain-bolt-plugin; \ ++ mv stagetrain-bolt-plugin bolt-plugin; \ ++ mv stageprofile-bolt-plugin prev-bolt-plugin || test -f stageprofile-lean ++@endif bolt-plugin + @if libctf + @cd $(HOST_SUBDIR); [ -d stagetrain-libctf ] || \ + mkdir stagetrain-libctf; \ +@@ -63947,6 +65182,12 @@ stagetrain-end:: + mv prev-lto-plugin stageprofile-lto-plugin; : ; \ + fi + @endif lto-plugin ++@if bolt-plugin ++ @if test -d $(HOST_SUBDIR)/bolt-plugin; then \ ++ cd $(HOST_SUBDIR); mv bolt-plugin stagetrain-bolt-plugin; \ ++ mv prev-bolt-plugin stageprofile-bolt-plugin; : ; \ ++ fi ++@endif bolt-plugin + @if libctf + @if test -d $(HOST_SUBDIR)/libctf; then \ + cd $(HOST_SUBDIR); mv libctf stagetrain-libctf; \ +@@ -64140,6 +65381,12 @@ stagefeedback-start:: + mv stagefeedback-lto-plugin lto-plugin; \ + mv stagetrain-lto-plugin prev-lto-plugin || test -f stagetrain-lean + @endif lto-plugin ++@if bolt-plugin ++ @cd $(HOST_SUBDIR); [ -d stagefeedback-bolt-plugin ] || \ ++ mkdir stagefeedback-bolt-plugin; \ ++ mv stagefeedback-bolt-plugin bolt-plugin; \ ++ mv stagetrain-bolt-plugin prev-bolt-plugin || test -f stagetrain-lean ++@endif bolt-plugin + @if libctf + @cd $(HOST_SUBDIR); [ -d stagefeedback-libctf ] || \ + mkdir stagefeedback-libctf; \ +@@ -64290,6 +65537,12 @@ stagefeedback-end:: + mv prev-lto-plugin stagetrain-lto-plugin; : ; \ + fi + @endif lto-plugin ++@if bolt-plugin ++ @if test -d $(HOST_SUBDIR)/bolt-plugin; then \ ++ cd $(HOST_SUBDIR); mv bolt-plugin stagefeedback-bolt-plugin; \ ++ mv prev-bolt-plugin stagetrain-bolt-plugin; : ; \ ++ fi ++@endif bolt-plugin + @if libctf + @if test -d $(HOST_SUBDIR)/libctf; then \ + cd $(HOST_SUBDIR); mv libctf stagefeedback-libctf; \ +@@ -64506,6 +65759,12 @@ stageautoprofile-start:: + mv stageautoprofile-lto-plugin lto-plugin; \ + mv stage1-lto-plugin prev-lto-plugin || test -f stage1-lean + @endif lto-plugin ++@if bolt-plugin ++ @cd $(HOST_SUBDIR); [ -d stageautoprofile-bolt-plugin ] || \ ++ mkdir stageautoprofile-bolt-plugin; \ ++ mv stageautoprofile-bolt-plugin bolt-plugin; \ ++ mv stage1-bolt-plugin prev-bolt-plugin || test -f stage1-lean ++@endif bolt-plugin + @if libctf + @cd $(HOST_SUBDIR); [ -d stageautoprofile-libctf ] || \ + mkdir stageautoprofile-libctf; \ +@@ -64656,6 +65915,12 @@ stageautoprofile-end:: + mv prev-lto-plugin stage1-lto-plugin; : ; \ + fi + @endif lto-plugin ++@if bolt-plugin ++ @if test -d $(HOST_SUBDIR)/bolt-plugin; then \ ++ cd $(HOST_SUBDIR); mv bolt-plugin stageautoprofile-bolt-plugin; \ ++ mv prev-bolt-plugin stage1-bolt-plugin; : ; \ ++ fi ++@endif bolt-plugin + @if libctf + @if test -d $(HOST_SUBDIR)/libctf; then \ + cd $(HOST_SUBDIR); mv libctf stageautoprofile-libctf; \ +@@ -64849,6 +66114,12 @@ stageautofeedback-start:: + mv stageautofeedback-lto-plugin lto-plugin; \ + mv stageautoprofile-lto-plugin prev-lto-plugin || test -f stageautoprofile-lean + @endif lto-plugin ++@if bolt-plugin ++ @cd $(HOST_SUBDIR); [ -d stageautofeedback-bolt-plugin ] || \ ++ mkdir stageautofeedback-bolt-plugin; \ ++ mv stageautofeedback-bolt-plugin bolt-plugin; \ ++ mv stageautoprofile-bolt-plugin prev-bolt-plugin || test -f stageautoprofile-lean ++@endif bolt-plugin + @if libctf + @cd $(HOST_SUBDIR); [ -d stageautofeedback-libctf ] || \ + mkdir stageautofeedback-libctf; \ +@@ -64999,6 +66270,12 @@ stageautofeedback-end:: + mv prev-lto-plugin stageautoprofile-lto-plugin; : ; \ + fi + @endif lto-plugin ++@if bolt-plugin ++ @if test -d $(HOST_SUBDIR)/bolt-plugin; then \ ++ cd $(HOST_SUBDIR); mv bolt-plugin stageautofeedback-bolt-plugin; \ ++ mv prev-bolt-plugin stageautoprofile-bolt-plugin; : ; \ ++ fi ++@endif bolt-plugin + @if libctf + @if test -d $(HOST_SUBDIR)/libctf; then \ + cd $(HOST_SUBDIR); mv libctf stageautofeedback-libctf; \ +@@ -65321,6 +66598,16 @@ configure-stagetrain-gcc: maybe-all-stagetrain-lto-plugin + configure-stagefeedback-gcc: maybe-all-stagefeedback-lto-plugin + configure-stageautoprofile-gcc: maybe-all-stageautoprofile-lto-plugin + configure-stageautofeedback-gcc: maybe-all-stageautofeedback-lto-plugin ++configure-gcc: maybe-all-bolt-plugin ++configure-stage1-gcc: maybe-all-stage1-bolt-plugin ++configure-stage2-gcc: maybe-all-stage2-bolt-plugin ++configure-stage3-gcc: maybe-all-stage3-bolt-plugin ++configure-stage4-gcc: maybe-all-stage4-bolt-plugin ++configure-stageprofile-gcc: maybe-all-stageprofile-bolt-plugin ++configure-stagetrain-gcc: maybe-all-stagetrain-bolt-plugin ++configure-stagefeedback-gcc: maybe-all-stagefeedback-bolt-plugin ++configure-stageautoprofile-gcc: maybe-all-stageautoprofile-bolt-plugin ++configure-stageautofeedback-gcc: maybe-all-stageautofeedback-bolt-plugin + configure-gcc: maybe-all-binutils + configure-stage1-gcc: maybe-all-stage1-binutils + configure-stage2-gcc: maybe-all-stage2-binutils +@@ -65571,6 +66858,16 @@ all-stagetrain-gcc: maybe-all-stagetrain-lto-plugin + all-stagefeedback-gcc: maybe-all-stagefeedback-lto-plugin + all-stageautoprofile-gcc: maybe-all-stageautoprofile-lto-plugin + all-stageautofeedback-gcc: maybe-all-stageautofeedback-lto-plugin ++all-gcc: maybe-all-bolt-plugin ++all-stage1-gcc: maybe-all-stage1-bolt-plugin ++all-stage2-gcc: maybe-all-stage2-bolt-plugin ++all-stage3-gcc: maybe-all-stage3-bolt-plugin ++all-stage4-gcc: maybe-all-stage4-bolt-plugin ++all-stageprofile-gcc: maybe-all-stageprofile-bolt-plugin ++all-stagetrain-gcc: maybe-all-stagetrain-bolt-plugin ++all-stagefeedback-gcc: maybe-all-stagefeedback-bolt-plugin ++all-stageautoprofile-gcc: maybe-all-stageautoprofile-bolt-plugin ++all-stageautofeedback-gcc: maybe-all-stageautofeedback-bolt-plugin + all-gcc: maybe-all-libiconv + all-stage1-gcc: maybe-all-stage1-libiconv + all-stage2-gcc: maybe-all-stage2-libiconv +@@ -65623,8 +66920,10 @@ html-stageautoprofile-gcc: maybe-all-build-libiberty + html-stageautofeedback-gcc: maybe-all-build-libiberty + install-gcc: maybe-install-fixincludes + install-gcc: maybe-install-lto-plugin ++install-gcc: maybe-install-bolt-plugin + install-strip-gcc: maybe-install-strip-fixincludes + install-strip-gcc: maybe-install-strip-lto-plugin ++install-strip-gcc: maybe-install-strip-bolt-plugin + configure-libcpp: configure-libiberty + configure-stage1-libcpp: configure-stage1-libiberty + configure-stage2-libcpp: configure-stage2-libiberty +@@ -65716,6 +67015,26 @@ all-stagetrain-lto-plugin: maybe-all-stagetrain-libiberty-linker-plugin + all-stagefeedback-lto-plugin: maybe-all-stagefeedback-libiberty-linker-plugin + all-stageautoprofile-lto-plugin: maybe-all-stageautoprofile-libiberty-linker-plugin + all-stageautofeedback-lto-plugin: maybe-all-stageautofeedback-libiberty-linker-plugin ++all-bolt-plugin: maybe-all-libiberty ++all-stage1-bolt-plugin: maybe-all-stage1-libiberty ++all-stage2-bolt-plugin: maybe-all-stage2-libiberty ++all-stage3-bolt-plugin: maybe-all-stage3-libiberty ++all-stage4-bolt-plugin: maybe-all-stage4-libiberty ++all-stageprofile-bolt-plugin: maybe-all-stageprofile-libiberty ++all-stagetrain-bolt-plugin: maybe-all-stagetrain-libiberty ++all-stagefeedback-bolt-plugin: maybe-all-stagefeedback-libiberty ++all-stageautoprofile-bolt-plugin: maybe-all-stageautoprofile-libiberty ++all-stageautofeedback-bolt-plugin: maybe-all-stageautofeedback-libiberty ++all-bolt-plugin: maybe-all-libiberty-linker-plugin ++all-stage1-bolt-plugin: maybe-all-stage1-libiberty-linker-plugin ++all-stage2-bolt-plugin: maybe-all-stage2-libiberty-linker-plugin ++all-stage3-bolt-plugin: maybe-all-stage3-libiberty-linker-plugin ++all-stage4-bolt-plugin: maybe-all-stage4-libiberty-linker-plugin ++all-stageprofile-bolt-plugin: maybe-all-stageprofile-libiberty-linker-plugin ++all-stagetrain-bolt-plugin: maybe-all-stagetrain-libiberty-linker-plugin ++all-stagefeedback-bolt-plugin: maybe-all-stagefeedback-libiberty-linker-plugin ++all-stageautoprofile-bolt-plugin: maybe-all-stageautoprofile-libiberty-linker-plugin ++all-stageautofeedback-bolt-plugin: maybe-all-stageautofeedback-libiberty-linker-plugin + all-gotools: maybe-all-target-libgo + configure-intl: maybe-all-libiconv + configure-stage1-intl: maybe-all-stage1-libiconv +diff --git a/bolt-plugin/Makefile.in b/bolt-plugin/Makefile.in +index 11b59407e..0a58abc45 100644 +--- a/bolt-plugin/Makefile.in ++++ b/bolt-plugin/Makefile.in +@@ -1,7 +1,7 @@ +-# Makefile.in generated by automake 1.16.5 from Makefile.am. ++# Makefile.in generated by automake 1.16.2 from Makefile.am. + # @configure_input@ + +-# Copyright (C) 1994-2021 Free Software Foundation, Inc. ++# Copyright (C) 1994-2020 Free Software Foundation, Inc. + + # This Makefile.in is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -91,7 +91,15 @@ host_triplet = @host@ + target_triplet = @target@ + subdir = . + ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +-am__aclocal_m4_deps = $(top_srcdir)/configure.ac ++am__aclocal_m4_deps = $(top_srcdir)/../config/acx.m4 \ ++ $(top_srcdir)/../config/depstand.m4 \ ++ $(top_srcdir)/../config/lead-dot.m4 \ ++ $(top_srcdir)/../config/lthostflags.m4 \ ++ $(top_srcdir)/../config/override.m4 \ ++ $(top_srcdir)/../config/warnings.m4 \ ++ $(top_srcdir)/../libtool.m4 $(top_srcdir)/../ltoptions.m4 \ ++ $(top_srcdir)/../ltsugar.m4 $(top_srcdir)/../ltversion.m4 \ ++ $(top_srcdir)/../lt~obsolete.m4 $(top_srcdir)/configure.ac + am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) + DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ +@@ -194,6 +202,9 @@ am__define_uniq_tagged_files = \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` ++ETAGS = etags ++CTAGS = ctags ++CSCOPE = cscope + AM_RECURSIVE_TARGETS = cscope + ACLOCAL = @ACLOCAL@ + AMTAR = @AMTAR@ +@@ -206,9 +217,8 @@ AWK = @AWK@ + CC = @CC@ + CCDEPMODE = @CCDEPMODE@ + CFLAGS = @CFLAGS@ ++CPP = @CPP@ + CPPFLAGS = @CPPFLAGS@ +-CSCOPE = @CSCOPE@ +-CTAGS = @CTAGS@ + CXX = @CXX@ + CXXCPP = @CXXCPP@ + CXXDEPMODE = @CXXDEPMODE@ +@@ -216,17 +226,14 @@ CXXFLAGS = @CXXFLAGS@ + CYGPATH_W = @CYGPATH_W@ + DEFS = @DEFS@ + DEPDIR = @DEPDIR@ +-DLLTOOL = @DLLTOOL@ + DSYMUTIL = @DSYMUTIL@ + DUMPBIN = @DUMPBIN@ + ECHO_C = @ECHO_C@ + ECHO_N = @ECHO_N@ + ECHO_T = @ECHO_T@ + EGREP = @EGREP@ +-ETAGS = @ETAGS@ + EXEEXT = @EXEEXT@ + FGREP = @FGREP@ +-FILECMD = @FILECMD@ + GREP = @GREP@ + INSTALL = @INSTALL@ + INSTALL_DATA = @INSTALL_DATA@ +@@ -241,10 +248,8 @@ LIBTOOL = @LIBTOOL@ + LIPO = @LIPO@ + LN_S = @LN_S@ + LTLIBOBJS = @LTLIBOBJS@ +-LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ + MAINT = @MAINT@ + MAKEINFO = @MAKEINFO@ +-MANIFEST_TOOL = @MANIFEST_TOOL@ + MKDIR_P = @MKDIR_P@ + NM = @NM@ + NMEDIT = @NMEDIT@ +@@ -271,7 +276,7 @@ abs_srcdir = @abs_srcdir@ + abs_top_builddir = @abs_top_builddir@ + abs_top_srcdir = @abs_top_srcdir@ + ac_bolt_plugin_ldflags = @ac_bolt_plugin_ldflags@ +-ac_ct_AR = @ac_ct_AR@ ++ac_bolt_plugin_warn_cflags = @ac_bolt_plugin_warn_cflags@ + ac_ct_CC = @ac_ct_CC@ + ac_ct_CXX = @ac_ct_CXX@ + ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +@@ -285,7 +290,9 @@ bindir = @bindir@ + build = @build@ + build_alias = @build_alias@ + build_cpu = @build_cpu@ ++build_libsubdir = @build_libsubdir@ + build_os = @build_os@ ++build_subdir = @build_subdir@ + build_vendor = @build_vendor@ + builddir = @builddir@ + datadir = @datadir@ +@@ -294,10 +301,12 @@ docdir = @docdir@ + dvidir = @dvidir@ + exec_prefix = @exec_prefix@ + gcc_build_dir = @gcc_build_dir@ ++get_gcc_base_ver = @get_gcc_base_ver@ + host = @host@ + host_alias = @host_alias@ + host_cpu = @host_cpu@ + host_os = @host_os@ ++host_subdir = @host_subdir@ + host_vendor = @host_vendor@ + htmldir = @htmldir@ + includedir = @includedir@ +@@ -307,6 +316,7 @@ libdir = @libdir@ + libexecdir = @libexecdir@ + localedir = @localedir@ + localstatedir = @localstatedir@ ++lt_host_flags = @lt_host_flags@ + mandir = @mandir@ + mkdir_p = @mkdir_p@ + oldincludedir = @oldincludedir@ +@@ -315,7 +325,6 @@ prefix = @prefix@ + program_transform_name = @program_transform_name@ + psdir = @psdir@ + real_target_noncanonical = @real_target_noncanonical@ +-runstatedir = @runstatedir@ + sbindir = @sbindir@ + sharedstatedir = @sharedstatedir@ + srcdir = @srcdir@ +@@ -325,6 +334,7 @@ target_alias = @target_alias@ + target_cpu = @target_cpu@ + target_noncanonical := @target_noncanonical@ + target_os = @target_os@ ++target_subdir = @target_subdir@ + target_vendor = @target_vendor@ + top_build_prefix = @top_build_prefix@ + top_builddir = @top_builddir@ +diff --git a/bolt-plugin/aclocal.m4 b/bolt-plugin/aclocal.m4 +index 679f2baa4..73bf7852c 100644 +--- a/bolt-plugin/aclocal.m4 ++++ b/bolt-plugin/aclocal.m4 +@@ -1,6 +1,6 @@ +-# generated automatically by aclocal 1.16.5 -*- Autoconf -*- ++# generated automatically by aclocal 1.16.2 -*- Autoconf -*- + +-# Copyright (C) 1996-2021 Free Software Foundation, Inc. ++# Copyright (C) 1996-2020 Free Software Foundation, Inc. + + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -14,9077 +14,13 @@ + m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) + m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +-m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.71],, +-[m4_warning([this file was generated for autoconf 2.71. ++m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],, ++[m4_warning([this file was generated for autoconf 2.69. + You have another version of autoconf. It may work, but is not guaranteed to. + If you have problems, you may need to regenerate the build system entirely. + To do so, use the procedure documented by the package, typically 'autoreconf'.])]) + +-# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- +-# +-# Copyright (C) 1996-2001, 2003-2019, 2021-2022 Free Software +-# Foundation, Inc. +-# Written by Gordon Matzigkeit, 1996 +-# +-# This file is free software; the Free Software Foundation gives +-# unlimited permission to copy and/or distribute it, with or without +-# modifications, as long as this notice is preserved. +- +-m4_define([_LT_COPYING], [dnl +-# Copyright (C) 2014 Free Software Foundation, Inc. +-# This is free software; see the source for copying conditions. There is NO +-# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +- +-# GNU Libtool is free software; you can redistribute it and/or modify +-# it under the terms of the GNU General Public License as published by +-# the Free Software Foundation; either version 2 of of the License, or +-# (at your option) any later version. +-# +-# As a special exception to the GNU General Public License, if you +-# distribute this file as part of a program or library that is built +-# using GNU Libtool, you may include this file under the same +-# distribution terms that you use for the rest of that program. +-# +-# GNU Libtool is distributed in the hope that it will be useful, but +-# WITHOUT ANY WARRANTY; without even the implied warranty of +-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +-# GNU General Public License for more details. +-# +-# You should have received a copy of the GNU General Public License +-# along with this program. If not, see . +-]) +- +-# serial 59 LT_INIT +- +- +-# LT_PREREQ(VERSION) +-# ------------------ +-# Complain and exit if this libtool version is less that VERSION. +-m4_defun([LT_PREREQ], +-[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, +- [m4_default([$3], +- [m4_fatal([Libtool version $1 or higher is required], +- 63)])], +- [$2])]) +- +- +-# _LT_CHECK_BUILDDIR +-# ------------------ +-# Complain if the absolute build directory name contains unusual characters +-m4_defun([_LT_CHECK_BUILDDIR], +-[case `pwd` in +- *\ * | *\ *) +- AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; +-esac +-]) +- +- +-# LT_INIT([OPTIONS]) +-# ------------------ +-AC_DEFUN([LT_INIT], +-[AC_PREREQ([2.62])dnl We use AC_PATH_PROGS_FEATURE_CHECK +-AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl +-AC_BEFORE([$0], [LT_LANG])dnl +-AC_BEFORE([$0], [LT_OUTPUT])dnl +-AC_BEFORE([$0], [LTDL_INIT])dnl +-m4_require([_LT_CHECK_BUILDDIR])dnl +- +-dnl Autoconf doesn't catch unexpanded LT_ macros by default: +-m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl +-m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl +-dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 +-dnl unless we require an AC_DEFUNed macro: +-AC_REQUIRE([LTOPTIONS_VERSION])dnl +-AC_REQUIRE([LTSUGAR_VERSION])dnl +-AC_REQUIRE([LTVERSION_VERSION])dnl +-AC_REQUIRE([LTOBSOLETE_VERSION])dnl +-m4_require([_LT_PROG_LTMAIN])dnl +- +-_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}]) +- +-dnl Parse OPTIONS +-_LT_SET_OPTIONS([$0], [$1]) +- +-# This can be used to rebuild libtool when needed +-LIBTOOL_DEPS=$ltmain +- +-# Always use our own libtool. +-LIBTOOL='$(SHELL) $(top_builddir)/libtool' +-AC_SUBST(LIBTOOL)dnl +- +-_LT_SETUP +- +-# Only expand once: +-m4_define([LT_INIT]) +-])# LT_INIT +- +-# Old names: +-AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) +-AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AC_PROG_LIBTOOL], []) +-dnl AC_DEFUN([AM_PROG_LIBTOOL], []) +- +- +-# _LT_PREPARE_CC_BASENAME +-# ----------------------- +-m4_defun([_LT_PREPARE_CC_BASENAME], [ +-# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +-func_cc_basename () +-{ +- for cc_temp in @S|@*""; do +- case $cc_temp in +- compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; +- distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; +- \-*) ;; +- *) break;; +- esac +- done +- func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +-} +-])# _LT_PREPARE_CC_BASENAME +- +- +-# _LT_CC_BASENAME(CC) +-# ------------------- +-# It would be clearer to call AC_REQUIREs from _LT_PREPARE_CC_BASENAME, +-# but that macro is also expanded into generated libtool script, which +-# arranges for $SED and $ECHO to be set by different means. +-m4_defun([_LT_CC_BASENAME], +-[m4_require([_LT_PREPARE_CC_BASENAME])dnl +-AC_REQUIRE([_LT_DECL_SED])dnl +-AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl +-func_cc_basename $1 +-cc_basename=$func_cc_basename_result +-]) +- +- +-# _LT_FILEUTILS_DEFAULTS +-# ---------------------- +-# It is okay to use these file commands and assume they have been set +-# sensibly after 'm4_require([_LT_FILEUTILS_DEFAULTS])'. +-m4_defun([_LT_FILEUTILS_DEFAULTS], +-[: ${CP="cp -f"} +-: ${MV="mv -f"} +-: ${RM="rm -f"} +-])# _LT_FILEUTILS_DEFAULTS +- +- +-# _LT_SETUP +-# --------- +-m4_defun([_LT_SETUP], +-[AC_REQUIRE([AC_CANONICAL_HOST])dnl +-AC_REQUIRE([AC_CANONICAL_BUILD])dnl +-AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl +-AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl +- +-_LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl +-dnl +-_LT_DECL([], [host_alias], [0], [The host system])dnl +-_LT_DECL([], [host], [0])dnl +-_LT_DECL([], [host_os], [0])dnl +-dnl +-_LT_DECL([], [build_alias], [0], [The build system])dnl +-_LT_DECL([], [build], [0])dnl +-_LT_DECL([], [build_os], [0])dnl +-dnl +-AC_REQUIRE([AC_PROG_CC])dnl +-AC_REQUIRE([LT_PATH_LD])dnl +-AC_REQUIRE([LT_PATH_NM])dnl +-dnl +-AC_REQUIRE([AC_PROG_LN_S])dnl +-test -z "$LN_S" && LN_S="ln -s" +-_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl +-dnl +-AC_REQUIRE([LT_CMD_MAX_LEN])dnl +-_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl +-_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl +-dnl +-m4_require([_LT_FILEUTILS_DEFAULTS])dnl +-m4_require([_LT_CHECK_SHELL_FEATURES])dnl +-m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl +-m4_require([_LT_CMD_RELOAD])dnl +-m4_require([_LT_DECL_FILECMD])dnl +-m4_require([_LT_CHECK_MAGIC_METHOD])dnl +-m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl +-m4_require([_LT_CMD_OLD_ARCHIVE])dnl +-m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +-m4_require([_LT_WITH_SYSROOT])dnl +-m4_require([_LT_CMD_TRUNCATE])dnl +- +-_LT_CONFIG_LIBTOOL_INIT([ +-# See if we are running on zsh, and set the options that allow our +-# commands through without removal of \ escapes INIT. +-if test -n "\${ZSH_VERSION+set}"; then +- setopt NO_GLOB_SUBST +-fi +-]) +-if test -n "${ZSH_VERSION+set}"; then +- setopt NO_GLOB_SUBST +-fi +- +-_LT_CHECK_OBJDIR +- +-m4_require([_LT_TAG_COMPILER])dnl +- +-case $host_os in +-aix3*) +- # AIX sometimes has problems with the GCC collect2 program. For some +- # reason, if we set the COLLECT_NAMES environment variable, the problems +- # vanish in a puff of smoke. +- if test set != "${COLLECT_NAMES+set}"; then +- COLLECT_NAMES= +- export COLLECT_NAMES +- fi +- ;; +-esac +- +-# Global variables: +-ofile=libtool +-can_build_shared=yes +- +-# All known linkers require a '.a' archive for static linking (except MSVC and +-# ICC, which need '.lib'). +-libext=a +- +-with_gnu_ld=$lt_cv_prog_gnu_ld +- +-old_CC=$CC +-old_CFLAGS=$CFLAGS +- +-# Set sane defaults for various variables +-test -z "$CC" && CC=cc +-test -z "$LTCC" && LTCC=$CC +-test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +-test -z "$LD" && LD=ld +-test -z "$ac_objext" && ac_objext=o +- +-_LT_CC_BASENAME([$compiler]) +- +-# Only perform the check for file, if the check method requires it +-test -z "$MAGIC_CMD" && MAGIC_CMD=file +-case $deplibs_check_method in +-file_magic*) +- if test "$file_magic_cmd" = '$MAGIC_CMD'; then +- _LT_PATH_MAGIC +- fi +- ;; +-esac +- +-# Use C for the default configuration in the libtool script +-LT_SUPPORTED_TAG([CC]) +-_LT_LANG_C_CONFIG +-_LT_LANG_DEFAULT_CONFIG +-_LT_CONFIG_COMMANDS +-])# _LT_SETUP +- +- +-# _LT_PREPARE_SED_QUOTE_VARS +-# -------------------------- +-# Define a few sed substitution that help us do robust quoting. +-m4_defun([_LT_PREPARE_SED_QUOTE_VARS], +-[# Backslashify metacharacters that are still active within +-# double-quoted strings. +-sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' +- +-# Same as above, but do not quote variable references. +-double_quote_subst='s/\([["`\\]]\)/\\\1/g' +- +-# Sed substitution to delay expansion of an escaped shell variable in a +-# double_quote_subst'ed string. +-delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' +- +-# Sed substitution to delay expansion of an escaped single quote. +-delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' +- +-# Sed substitution to avoid accidental globbing in evaled expressions +-no_glob_subst='s/\*/\\\*/g' +-]) +- +-# _LT_PROG_LTMAIN +-# --------------- +-# Note that this code is called both from 'configure', and 'config.status' +-# now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, +-# 'config.status' has no value for ac_aux_dir unless we are using Automake, +-# so we pass a copy along to make sure it has a sensible value anyway. +-m4_defun([_LT_PROG_LTMAIN], +-[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl +-_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) +-ltmain=$ac_aux_dir/ltmain.sh +-])# _LT_PROG_LTMAIN +- +- +- +-# So that we can recreate a full libtool script including additional +-# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS +-# in macros and then make a single call at the end using the 'libtool' +-# label. +- +- +-# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) +-# ---------------------------------------- +-# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. +-m4_define([_LT_CONFIG_LIBTOOL_INIT], +-[m4_ifval([$1], +- [m4_append([_LT_OUTPUT_LIBTOOL_INIT], +- [$1 +-])])]) +- +-# Initialize. +-m4_define([_LT_OUTPUT_LIBTOOL_INIT]) +- +- +-# _LT_CONFIG_LIBTOOL([COMMANDS]) +-# ------------------------------ +-# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. +-m4_define([_LT_CONFIG_LIBTOOL], +-[m4_ifval([$1], +- [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], +- [$1 +-])])]) +- +-# Initialize. +-m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) +- +- +-# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) +-# ----------------------------------------------------- +-m4_defun([_LT_CONFIG_SAVE_COMMANDS], +-[_LT_CONFIG_LIBTOOL([$1]) +-_LT_CONFIG_LIBTOOL_INIT([$2]) +-]) +- +- +-# _LT_FORMAT_COMMENT([COMMENT]) +-# ----------------------------- +-# Add leading comment marks to the start of each line, and a trailing +-# full-stop to the whole comment if one is not present already. +-m4_define([_LT_FORMAT_COMMENT], +-[m4_ifval([$1], [ +-m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], +- [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) +-)]) +- +- +- +- +- +-# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) +-# ------------------------------------------------------------------- +-# CONFIGNAME is the name given to the value in the libtool script. +-# VARNAME is the (base) name used in the configure script. +-# VALUE may be 0, 1 or 2 for a computed quote escaped value based on +-# VARNAME. Any other value will be used directly. +-m4_define([_LT_DECL], +-[lt_if_append_uniq([lt_decl_varnames], [$2], [, ], +- [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], +- [m4_ifval([$1], [$1], [$2])]) +- lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) +- m4_ifval([$4], +- [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) +- lt_dict_add_subkey([lt_decl_dict], [$2], +- [tagged?], [m4_ifval([$5], [yes], [no])])]) +-]) +- +- +-# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) +-# -------------------------------------------------------- +-m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) +- +- +-# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) +-# ------------------------------------------------ +-m4_define([lt_decl_tag_varnames], +-[_lt_decl_filter([tagged?], [yes], $@)]) +- +- +-# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) +-# --------------------------------------------------------- +-m4_define([_lt_decl_filter], +-[m4_case([$#], +- [0], [m4_fatal([$0: too few arguments: $#])], +- [1], [m4_fatal([$0: too few arguments: $#: $1])], +- [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], +- [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], +- [lt_dict_filter([lt_decl_dict], $@)])[]dnl +-]) +- +- +-# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) +-# -------------------------------------------------- +-m4_define([lt_decl_quote_varnames], +-[_lt_decl_filter([value], [1], $@)]) +- +- +-# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) +-# --------------------------------------------------- +-m4_define([lt_decl_dquote_varnames], +-[_lt_decl_filter([value], [2], $@)]) +- +- +-# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) +-# --------------------------------------------------- +-m4_define([lt_decl_varnames_tagged], +-[m4_assert([$# <= 2])dnl +-_$0(m4_quote(m4_default([$1], [[, ]])), +- m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), +- m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) +-m4_define([_lt_decl_varnames_tagged], +-[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) +- +- +-# lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) +-# ------------------------------------------------ +-m4_define([lt_decl_all_varnames], +-[_$0(m4_quote(m4_default([$1], [[, ]])), +- m4_if([$2], [], +- m4_quote(lt_decl_varnames), +- m4_quote(m4_shift($@))))[]dnl +-]) +-m4_define([_lt_decl_all_varnames], +-[lt_join($@, lt_decl_varnames_tagged([$1], +- lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl +-]) +- +- +-# _LT_CONFIG_STATUS_DECLARE([VARNAME]) +-# ------------------------------------ +-# Quote a variable value, and forward it to 'config.status' so that its +-# declaration there will have the same value as in 'configure'. VARNAME +-# must have a single quote delimited value for this to work. +-m4_define([_LT_CONFIG_STATUS_DECLARE], +-[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`']) +- +- +-# _LT_CONFIG_STATUS_DECLARATIONS +-# ------------------------------ +-# We delimit libtool config variables with single quotes, so when +-# we write them to config.status, we have to be sure to quote all +-# embedded single quotes properly. In configure, this macro expands +-# each variable declared with _LT_DECL (and _LT_TAGDECL) into: +-# +-# ='`$ECHO "$" | $SED "$delay_single_quote_subst"`' +-m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], +-[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), +- [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) +- +- +-# _LT_LIBTOOL_TAGS +-# ---------------- +-# Output comment and list of tags supported by the script +-m4_defun([_LT_LIBTOOL_TAGS], +-[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl +-available_tags='_LT_TAGS'dnl +-]) +- +- +-# _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) +-# ----------------------------------- +-# Extract the dictionary values for VARNAME (optionally with TAG) and +-# expand to a commented shell variable setting: +-# +-# # Some comment about what VAR is for. +-# visible_name=$lt_internal_name +-m4_define([_LT_LIBTOOL_DECLARE], +-[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], +- [description])))[]dnl +-m4_pushdef([_libtool_name], +- m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl +-m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), +- [0], [_libtool_name=[$]$1], +- [1], [_libtool_name=$lt_[]$1], +- [2], [_libtool_name=$lt_[]$1], +- [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl +-m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl +-]) +- +- +-# _LT_LIBTOOL_CONFIG_VARS +-# ----------------------- +-# Produce commented declarations of non-tagged libtool config variables +-# suitable for insertion in the LIBTOOL CONFIG section of the 'libtool' +-# script. Tagged libtool config variables (even for the LIBTOOL CONFIG +-# section) are produced by _LT_LIBTOOL_TAG_VARS. +-m4_defun([_LT_LIBTOOL_CONFIG_VARS], +-[m4_foreach([_lt_var], +- m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), +- [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) +- +- +-# _LT_LIBTOOL_TAG_VARS(TAG) +-# ------------------------- +-m4_define([_LT_LIBTOOL_TAG_VARS], +-[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), +- [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) +- +- +-# _LT_TAGVAR(VARNAME, [TAGNAME]) +-# ------------------------------ +-m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) +- +- +-# _LT_CONFIG_COMMANDS +-# ------------------- +-# Send accumulated output to $CONFIG_STATUS. Thanks to the lists of +-# variables for single and double quote escaping we saved from calls +-# to _LT_DECL, we can put quote escaped variables declarations +-# into 'config.status', and then the shell code to quote escape them in +-# for loops in 'config.status'. Finally, any additional code accumulated +-# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. +-m4_defun([_LT_CONFIG_COMMANDS], +-[AC_PROVIDE_IFELSE([LT_OUTPUT], +- dnl If the libtool generation code has been placed in $CONFIG_LT, +- dnl instead of duplicating it all over again into config.status, +- dnl then we will have config.status run $CONFIG_LT later, so it +- dnl needs to know what name is stored there: +- [AC_CONFIG_COMMANDS([libtool], +- [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], +- dnl If the libtool generation code is destined for config.status, +- dnl expand the accumulated commands and init code now: +- [AC_CONFIG_COMMANDS([libtool], +- [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) +-])#_LT_CONFIG_COMMANDS +- +- +-# Initialize. +-m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], +-[ +- +-# The HP-UX ksh and POSIX shell print the target directory to stdout +-# if CDPATH is set. +-(unset CDPATH) >/dev/null 2>&1 && unset CDPATH +- +-sed_quote_subst='$sed_quote_subst' +-double_quote_subst='$double_quote_subst' +-delay_variable_subst='$delay_variable_subst' +-_LT_CONFIG_STATUS_DECLARATIONS +-LTCC='$LTCC' +-LTCFLAGS='$LTCFLAGS' +-compiler='$compiler_DEFAULT' +- +-# A function that is used when there is no print builtin or printf. +-func_fallback_echo () +-{ +- eval 'cat <<_LTECHO_EOF +-\$[]1 +-_LTECHO_EOF' +-} +- +-# Quote evaled strings. +-for var in lt_decl_all_varnames([[ \ +-]], lt_decl_quote_varnames); do +- case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in +- *[[\\\\\\\`\\"\\\$]]*) +- eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes +- ;; +- *) +- eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" +- ;; +- esac +-done +- +-# Double-quote double-evaled strings. +-for var in lt_decl_all_varnames([[ \ +-]], lt_decl_dquote_varnames); do +- case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in +- *[[\\\\\\\`\\"\\\$]]*) +- eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes +- ;; +- *) +- eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" +- ;; +- esac +-done +- +-_LT_OUTPUT_LIBTOOL_INIT +-]) +- +-# _LT_GENERATED_FILE_INIT(FILE, [COMMENT]) +-# ------------------------------------ +-# Generate a child script FILE with all initialization necessary to +-# reuse the environment learned by the parent script, and make the +-# file executable. If COMMENT is supplied, it is inserted after the +-# '#!' sequence but before initialization text begins. After this +-# macro, additional text can be appended to FILE to form the body of +-# the child script. The macro ends with non-zero status if the +-# file could not be fully written (such as if the disk is full). +-m4_ifdef([AS_INIT_GENERATED], +-[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])], +-[m4_defun([_LT_GENERATED_FILE_INIT], +-[m4_require([AS_PREPARE])]dnl +-[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl +-[lt_write_fail=0 +-cat >$1 <<_ASEOF || lt_write_fail=1 +-#! $SHELL +-# Generated by $as_me. +-$2 +-SHELL=\${CONFIG_SHELL-$SHELL} +-export SHELL +-_ASEOF +-cat >>$1 <<\_ASEOF || lt_write_fail=1 +-AS_SHELL_SANITIZE +-_AS_PREPARE +-exec AS_MESSAGE_FD>&1 +-_ASEOF +-test 0 = "$lt_write_fail" && chmod +x $1[]dnl +-m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT +- +-# LT_OUTPUT +-# --------- +-# This macro allows early generation of the libtool script (before +-# AC_OUTPUT is called), incase it is used in configure for compilation +-# tests. +-AC_DEFUN([LT_OUTPUT], +-[: ${CONFIG_LT=./config.lt} +-AC_MSG_NOTICE([creating $CONFIG_LT]) +-_LT_GENERATED_FILE_INIT(["$CONFIG_LT"], +-[# Run this file to recreate a libtool stub with the current configuration.]) +- +-cat >>"$CONFIG_LT" <<\_LTEOF +-lt_cl_silent=false +-exec AS_MESSAGE_LOG_FD>>config.log +-{ +- echo +- AS_BOX([Running $as_me.]) +-} >&AS_MESSAGE_LOG_FD +- +-lt_cl_help="\ +-'$as_me' creates a local libtool stub from the current configuration, +-for use in further configure time tests before the real libtool is +-generated. +- +-Usage: $[0] [[OPTIONS]] +- +- -h, --help print this help, then exit +- -V, --version print version number, then exit +- -q, --quiet do not print progress messages +- -d, --debug don't remove temporary files +- +-Report bugs to ." +- +-lt_cl_version="\ +-m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl +-m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) +-configured by $[0], generated by m4_PACKAGE_STRING. +- +-Copyright (C) 2011 Free Software Foundation, Inc. +-This config.lt script is free software; the Free Software Foundation +-gives unlimited permision to copy, distribute and modify it." +- +-while test 0 != $[#] +-do +- case $[1] in +- --version | --v* | -V ) +- echo "$lt_cl_version"; exit 0 ;; +- --help | --h* | -h ) +- echo "$lt_cl_help"; exit 0 ;; +- --debug | --d* | -d ) +- debug=: ;; +- --quiet | --q* | --silent | --s* | -q ) +- lt_cl_silent=: ;; +- +- -*) AC_MSG_ERROR([unrecognized option: $[1] +-Try '$[0] --help' for more information.]) ;; +- +- *) AC_MSG_ERROR([unrecognized argument: $[1] +-Try '$[0] --help' for more information.]) ;; +- esac +- shift +-done +- +-if $lt_cl_silent; then +- exec AS_MESSAGE_FD>/dev/null +-fi +-_LTEOF +- +-cat >>"$CONFIG_LT" <<_LTEOF +-_LT_OUTPUT_LIBTOOL_COMMANDS_INIT +-_LTEOF +- +-cat >>"$CONFIG_LT" <<\_LTEOF +-AC_MSG_NOTICE([creating $ofile]) +-_LT_OUTPUT_LIBTOOL_COMMANDS +-AS_EXIT(0) +-_LTEOF +-chmod +x "$CONFIG_LT" +- +-# configure is writing to config.log, but config.lt does its own redirection, +-# appending to config.log, which fails on DOS, as config.log is still kept +-# open by configure. Here we exec the FD to /dev/null, effectively closing +-# config.log, so it can be properly (re)opened and appended to by config.lt. +-lt_cl_success=: +-test yes = "$silent" && +- lt_config_lt_args="$lt_config_lt_args --quiet" +-exec AS_MESSAGE_LOG_FD>/dev/null +-$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false +-exec AS_MESSAGE_LOG_FD>>config.log +-$lt_cl_success || AS_EXIT(1) +-])# LT_OUTPUT +- +- +-# _LT_CONFIG(TAG) +-# --------------- +-# If TAG is the built-in tag, create an initial libtool script with a +-# default configuration from the untagged config vars. Otherwise add code +-# to config.status for appending the configuration named by TAG from the +-# matching tagged config vars. +-m4_defun([_LT_CONFIG], +-[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +-_LT_CONFIG_SAVE_COMMANDS([ +- m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl +- m4_if(_LT_TAG, [C], [ +- # See if we are running on zsh, and set the options that allow our +- # commands through without removal of \ escapes. +- if test -n "${ZSH_VERSION+set}"; then +- setopt NO_GLOB_SUBST +- fi +- +- cfgfile=${ofile}T +- trap "$RM \"$cfgfile\"; exit 1" 1 2 15 +- $RM "$cfgfile" +- +- cat <<_LT_EOF >> "$cfgfile" +-#! $SHELL +-# Generated automatically by $as_me ($PACKAGE) $VERSION +-# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +-# NOTE: Changes made to this file will be lost: look at ltmain.sh. +- +-# Provide generalized library-building support services. +-# Written by Gordon Matzigkeit, 1996 +- +-_LT_COPYING +-_LT_LIBTOOL_TAGS +- +-# Configured defaults for sys_lib_dlsearch_path munging. +-: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} +- +-# ### BEGIN LIBTOOL CONFIG +-_LT_LIBTOOL_CONFIG_VARS +-_LT_LIBTOOL_TAG_VARS +-# ### END LIBTOOL CONFIG +- +-_LT_EOF +- +- cat <<'_LT_EOF' >> "$cfgfile" +- +-# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE +- +-_LT_PREPARE_MUNGE_PATH_LIST +-_LT_PREPARE_CC_BASENAME +- +-# ### END FUNCTIONS SHARED WITH CONFIGURE +- +-_LT_EOF +- +- case $host_os in +- aix3*) +- cat <<\_LT_EOF >> "$cfgfile" +-# AIX sometimes has problems with the GCC collect2 program. For some +-# reason, if we set the COLLECT_NAMES environment variable, the problems +-# vanish in a puff of smoke. +-if test set != "${COLLECT_NAMES+set}"; then +- COLLECT_NAMES= +- export COLLECT_NAMES +-fi +-_LT_EOF +- ;; +- esac +- +- _LT_PROG_LTMAIN +- +- # We use sed instead of cat because bash on DJGPP gets confused if +- # if finds mixed CR/LF and LF-only lines. Since sed operates in +- # text mode, it properly converts lines to CR/LF. This bash problem +- # is reportedly fixed, but why not run on old versions too? +- $SED '$q' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- mv -f "$cfgfile" "$ofile" || +- (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") +- chmod +x "$ofile" +-], +-[cat <<_LT_EOF >> "$ofile" +- +-dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded +-dnl in a comment (ie after a #). +-# ### BEGIN LIBTOOL TAG CONFIG: $1 +-_LT_LIBTOOL_TAG_VARS(_LT_TAG) +-# ### END LIBTOOL TAG CONFIG: $1 +-_LT_EOF +-])dnl /m4_if +-], +-[m4_if([$1], [], [ +- PACKAGE='$PACKAGE' +- VERSION='$VERSION' +- RM='$RM' +- ofile='$ofile'], []) +-])dnl /_LT_CONFIG_SAVE_COMMANDS +-])# _LT_CONFIG +- +- +-# LT_SUPPORTED_TAG(TAG) +-# --------------------- +-# Trace this macro to discover what tags are supported by the libtool +-# --tag option, using: +-# autoconf --trace 'LT_SUPPORTED_TAG:$1' +-AC_DEFUN([LT_SUPPORTED_TAG], []) +- +- +-# C support is built-in for now +-m4_define([_LT_LANG_C_enabled], []) +-m4_define([_LT_TAGS], []) +- +- +-# LT_LANG(LANG) +-# ------------- +-# Enable libtool support for the given language if not already enabled. +-AC_DEFUN([LT_LANG], +-[AC_BEFORE([$0], [LT_OUTPUT])dnl +-m4_case([$1], +- [C], [_LT_LANG(C)], +- [C++], [_LT_LANG(CXX)], +- [Go], [_LT_LANG(GO)], +- [Java], [_LT_LANG(GCJ)], +- [Fortran 77], [_LT_LANG(F77)], +- [Fortran], [_LT_LANG(FC)], +- [Windows Resource], [_LT_LANG(RC)], +- [m4_ifdef([_LT_LANG_]$1[_CONFIG], +- [_LT_LANG($1)], +- [m4_fatal([$0: unsupported language: "$1"])])])dnl +-])# LT_LANG +- +- +-# _LT_LANG(LANGNAME) +-# ------------------ +-m4_defun([_LT_LANG], +-[m4_ifdef([_LT_LANG_]$1[_enabled], [], +- [LT_SUPPORTED_TAG([$1])dnl +- m4_append([_LT_TAGS], [$1 ])dnl +- m4_define([_LT_LANG_]$1[_enabled], [])dnl +- _LT_LANG_$1_CONFIG($1)])dnl +-])# _LT_LANG +- +- +-m4_ifndef([AC_PROG_GO], [ +-# NOTE: This macro has been submitted for inclusion into # +-# GNU Autoconf as AC_PROG_GO. When it is available in # +-# a released version of Autoconf we should remove this # +-# macro and use it instead. # +-m4_defun([AC_PROG_GO], +-[AC_LANG_PUSH(Go)dnl +-AC_ARG_VAR([GOC], [Go compiler command])dnl +-AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl +-_AC_ARG_VAR_LDFLAGS()dnl +-AC_CHECK_TOOL(GOC, gccgo) +-if test -z "$GOC"; then +- if test -n "$ac_tool_prefix"; then +- AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo]) +- fi +-fi +-if test -z "$GOC"; then +- AC_CHECK_PROG(GOC, gccgo, gccgo, false) +-fi +-])#m4_defun +-])#m4_ifndef +- +- +-# _LT_LANG_DEFAULT_CONFIG +-# ----------------------- +-m4_defun([_LT_LANG_DEFAULT_CONFIG], +-[AC_PROVIDE_IFELSE([AC_PROG_CXX], +- [LT_LANG(CXX)], +- [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) +- +-AC_PROVIDE_IFELSE([AC_PROG_F77], +- [LT_LANG(F77)], +- [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) +- +-AC_PROVIDE_IFELSE([AC_PROG_FC], +- [LT_LANG(FC)], +- [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) +- +-dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal +-dnl pulling things in needlessly. +-AC_PROVIDE_IFELSE([AC_PROG_GCJ], +- [LT_LANG(GCJ)], +- [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], +- [LT_LANG(GCJ)], +- [AC_PROVIDE_IFELSE([LT_PROG_GCJ], +- [LT_LANG(GCJ)], +- [m4_ifdef([AC_PROG_GCJ], +- [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) +- m4_ifdef([A][M_PROG_GCJ], +- [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) +- m4_ifdef([LT_PROG_GCJ], +- [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) +- +-AC_PROVIDE_IFELSE([AC_PROG_GO], +- [LT_LANG(GO)], +- [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])]) +- +-AC_PROVIDE_IFELSE([LT_PROG_RC], +- [LT_LANG(RC)], +- [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) +-])# _LT_LANG_DEFAULT_CONFIG +- +-# Obsolete macros: +-AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) +-AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) +-AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) +-AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) +-AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)]) +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AC_LIBTOOL_CXX], []) +-dnl AC_DEFUN([AC_LIBTOOL_F77], []) +-dnl AC_DEFUN([AC_LIBTOOL_FC], []) +-dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) +-dnl AC_DEFUN([AC_LIBTOOL_RC], []) +- +- +-# _LT_TAG_COMPILER +-# ---------------- +-m4_defun([_LT_TAG_COMPILER], +-[AC_REQUIRE([AC_PROG_CC])dnl +- +-_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl +-_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl +-_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl +-_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl +- +-# If no C compiler was specified, use CC. +-LTCC=${LTCC-"$CC"} +- +-# If no C compiler flags were specified, use CFLAGS. +-LTCFLAGS=${LTCFLAGS-"$CFLAGS"} +- +-# Allow CC to be a program name with arguments. +-compiler=$CC +-])# _LT_TAG_COMPILER +- +- +-# _LT_COMPILER_BOILERPLATE +-# ------------------------ +-# Check for compiler boilerplate output or warnings with +-# the simple compiler test code. +-m4_defun([_LT_COMPILER_BOILERPLATE], +-[m4_require([_LT_DECL_SED])dnl +-ac_outfile=conftest.$ac_objext +-echo "$lt_simple_compile_test_code" >conftest.$ac_ext +-eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +-_lt_compiler_boilerplate=`cat conftest.err` +-$RM conftest* +-])# _LT_COMPILER_BOILERPLATE +- +- +-# _LT_LINKER_BOILERPLATE +-# ---------------------- +-# Check for linker boilerplate output or warnings with +-# the simple link test code. +-m4_defun([_LT_LINKER_BOILERPLATE], +-[m4_require([_LT_DECL_SED])dnl +-ac_outfile=conftest.$ac_objext +-echo "$lt_simple_link_test_code" >conftest.$ac_ext +-eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +-_lt_linker_boilerplate=`cat conftest.err` +-$RM -r conftest* +-])# _LT_LINKER_BOILERPLATE +- +-# _LT_REQUIRED_DARWIN_CHECKS +-# ------------------------- +-m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ +- case $host_os in +- rhapsody* | darwin*) +- AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) +- AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) +- AC_CHECK_TOOL([LIPO], [lipo], [:]) +- AC_CHECK_TOOL([OTOOL], [otool], [:]) +- AC_CHECK_TOOL([OTOOL64], [otool64], [:]) +- _LT_DECL([], [DSYMUTIL], [1], +- [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) +- _LT_DECL([], [NMEDIT], [1], +- [Tool to change global to local symbols on Mac OS X]) +- _LT_DECL([], [LIPO], [1], +- [Tool to manipulate fat objects and archives on Mac OS X]) +- _LT_DECL([], [OTOOL], [1], +- [ldd/readelf like tool for Mach-O binaries on Mac OS X]) +- _LT_DECL([], [OTOOL64], [1], +- [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) +- +- AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], +- [lt_cv_apple_cc_single_mod=no +- if test -z "$LT_MULTI_MODULE"; then +- # By default we will add the -single_module flag. You can override +- # by either setting the environment variable LT_MULTI_MODULE +- # non-empty at configure time, or by adding -multi_module to the +- # link flags. +- rm -rf libconftest.dylib* +- echo "int foo(void){return 1;}" > conftest.c +- echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +--dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD +- $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +- -dynamiclib -Wl,-single_module conftest.c 2>conftest.err +- _lt_result=$? +- # If there is a non-empty error log, and "single_module" +- # appears in it, assume the flag caused a linker warning +- if test -s conftest.err && $GREP single_module conftest.err; then +- cat conftest.err >&AS_MESSAGE_LOG_FD +- # Otherwise, if the output was created with a 0 exit code from +- # the compiler, it worked. +- elif test -f libconftest.dylib && test 0 = "$_lt_result"; then +- lt_cv_apple_cc_single_mod=yes +- else +- cat conftest.err >&AS_MESSAGE_LOG_FD +- fi +- rm -rf libconftest.dylib* +- rm -f conftest.* +- fi]) +- +- AC_CACHE_CHECK([for -exported_symbols_list linker flag], +- [lt_cv_ld_exported_symbols_list], +- [lt_cv_ld_exported_symbols_list=no +- save_LDFLAGS=$LDFLAGS +- echo "_main" > conftest.sym +- LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" +- AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], +- [lt_cv_ld_exported_symbols_list=yes], +- [lt_cv_ld_exported_symbols_list=no]) +- LDFLAGS=$save_LDFLAGS +- ]) +- +- AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load], +- [lt_cv_ld_force_load=no +- cat > conftest.c << _LT_EOF +-int forced_loaded() { return 2;} +-_LT_EOF +- echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD +- $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD +- echo "$AR $AR_FLAGS libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD +- $AR $AR_FLAGS libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD +- echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD +- $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD +- cat > conftest.c << _LT_EOF +-int main() { return 0;} +-_LT_EOF +- echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD +- $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err +- _lt_result=$? +- if test -s conftest.err && $GREP force_load conftest.err; then +- cat conftest.err >&AS_MESSAGE_LOG_FD +- elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then +- lt_cv_ld_force_load=yes +- else +- cat conftest.err >&AS_MESSAGE_LOG_FD +- fi +- rm -f conftest.err libconftest.a conftest conftest.c +- rm -rf conftest.dSYM +- ]) +- case $host_os in +- rhapsody* | darwin1.[[012]]) +- _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; +- darwin1.*) +- _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; +- darwin*) +- case $MACOSX_DEPLOYMENT_TARGET,$host in +- 10.[[012]],*|,*powerpc*-darwin[[5-8]]*) +- _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; +- *) +- _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; +- esac +- ;; +- esac +- if test yes = "$lt_cv_apple_cc_single_mod"; then +- _lt_dar_single_mod='$single_module' +- fi +- if test yes = "$lt_cv_ld_exported_symbols_list"; then +- _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' +- else +- _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' +- fi +- if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then +- _lt_dsymutil='~$DSYMUTIL $lib || :' +- else +- _lt_dsymutil= +- fi +- ;; +- esac +-]) +- +- +-# _LT_DARWIN_LINKER_FEATURES([TAG]) +-# --------------------------------- +-# Checks for linker and compiler features on darwin +-m4_defun([_LT_DARWIN_LINKER_FEATURES], +-[ +- m4_require([_LT_REQUIRED_DARWIN_CHECKS]) +- _LT_TAGVAR(archive_cmds_need_lc, $1)=no +- _LT_TAGVAR(hardcode_direct, $1)=no +- _LT_TAGVAR(hardcode_automatic, $1)=yes +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported +- if test yes = "$lt_cv_ld_force_load"; then +- _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' +- m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes], +- [FC], [_LT_TAGVAR(compiler_needs_object, $1)=yes]) +- else +- _LT_TAGVAR(whole_archive_flag_spec, $1)='' +- fi +- _LT_TAGVAR(link_all_deplibs, $1)=yes +- _LT_TAGVAR(allow_undefined_flag, $1)=$_lt_dar_allow_undefined +- case $cc_basename in +- ifort*|nagfor*) _lt_dar_can_shared=yes ;; +- *) _lt_dar_can_shared=$GCC ;; +- esac +- if test yes = "$_lt_dar_can_shared"; then +- output_verbose_link_cmd=func_echo_all +- _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" +- _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" +- _LT_TAGVAR(archive_expsym_cmds, $1)="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" +- _LT_TAGVAR(module_expsym_cmds, $1)="$SED -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" +- m4_if([$1], [CXX], +-[ if test yes != "$lt_cv_apple_cc_single_mod"; then +- _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" +- _LT_TAGVAR(archive_expsym_cmds, $1)="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" +- fi +-],[]) +- else +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +-]) +- +-# _LT_SYS_MODULE_PATH_AIX([TAGNAME]) +-# ---------------------------------- +-# Links a minimal program and checks the executable +-# for the system default hardcoded library path. In most cases, +-# this is /usr/lib:/lib, but when the MPI compilers are used +-# the location of the communication and MPI libs are included too. +-# If we don't find anything, use the default library path according +-# to the aix ld manual. +-# Store the results from the different compilers for each TAGNAME. +-# Allow to override them for all tags through lt_cv_aix_libpath. +-m4_defun([_LT_SYS_MODULE_PATH_AIX], +-[m4_require([_LT_DECL_SED])dnl +-if test set = "${lt_cv_aix_libpath+set}"; then +- aix_libpath=$lt_cv_aix_libpath +-else +- AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], +- [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ +- lt_aix_libpath_sed='[ +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\([^ ]*\) *$/\1/ +- p +- } +- }]' +- _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +- # Check for a 64-bit object if we didn't find anything. +- if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then +- _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +- fi],[]) +- if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then +- _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=/usr/lib:/lib +- fi +- ]) +- aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) +-fi +-])# _LT_SYS_MODULE_PATH_AIX +- +- +-# _LT_SHELL_INIT(ARG) +-# ------------------- +-m4_define([_LT_SHELL_INIT], +-[m4_divert_text([M4SH-INIT], [$1 +-])])# _LT_SHELL_INIT +- +- +- +-# _LT_PROG_ECHO_BACKSLASH +-# ----------------------- +-# Find how we can fake an echo command that does not interpret backslash. +-# In particular, with Autoconf 2.60 or later we add some code to the start +-# of the generated configure script that will find a shell with a builtin +-# printf (that we can use as an echo command). +-m4_defun([_LT_PROG_ECHO_BACKSLASH], +-[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +-ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +-ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO +- +-AC_MSG_CHECKING([how to print strings]) +-# Test print first, because it will be a builtin if present. +-if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ +- test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then +- ECHO='print -r --' +-elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then +- ECHO='printf %s\n' +-else +- # Use this function as a fallback that always works. +- func_fallback_echo () +- { +- eval 'cat <<_LTECHO_EOF +-$[]1 +-_LTECHO_EOF' +- } +- ECHO='func_fallback_echo' +-fi +- +-# func_echo_all arg... +-# Invoke $ECHO with all args, space-separated. +-func_echo_all () +-{ +- $ECHO "$*" +-} +- +-case $ECHO in +- printf*) AC_MSG_RESULT([printf]) ;; +- print*) AC_MSG_RESULT([print -r]) ;; +- *) AC_MSG_RESULT([cat]) ;; +-esac +- +-m4_ifdef([_AS_DETECT_SUGGESTED], +-[_AS_DETECT_SUGGESTED([ +- test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || ( +- ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +- ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +- ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO +- PATH=/empty FPATH=/empty; export PATH FPATH +- test "X`printf %s $ECHO`" = "X$ECHO" \ +- || test "X`print -r -- $ECHO`" = "X$ECHO" )])]) +- +-_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) +-_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) +-])# _LT_PROG_ECHO_BACKSLASH +- +- +-# _LT_WITH_SYSROOT +-# ---------------- +-AC_DEFUN([_LT_WITH_SYSROOT], +-[m4_require([_LT_DECL_SED])dnl +-AC_MSG_CHECKING([for sysroot]) +-AC_ARG_WITH([sysroot], +-[AS_HELP_STRING([--with-sysroot@<:@=DIR@:>@], +- [Search for dependent libraries within DIR (or the compiler's sysroot +- if not specified).])], +-[], [with_sysroot=no]) +- +-dnl lt_sysroot will always be passed unquoted. We quote it here +-dnl in case the user passed a directory name. +-lt_sysroot= +-case $with_sysroot in #( +- yes) +- if test yes = "$GCC"; then +- lt_sysroot=`$CC --print-sysroot 2>/dev/null` +- fi +- ;; #( +- /*) +- lt_sysroot=`echo "$with_sysroot" | $SED -e "$sed_quote_subst"` +- ;; #( +- no|'') +- ;; #( +- *) +- AC_MSG_RESULT([$with_sysroot]) +- AC_MSG_ERROR([The sysroot must be an absolute path.]) +- ;; +-esac +- +- AC_MSG_RESULT([${lt_sysroot:-no}]) +-_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl +-[dependent libraries, and where our libraries should be installed.])]) +- +-# _LT_ENABLE_LOCK +-# --------------- +-m4_defun([_LT_ENABLE_LOCK], +-[AC_ARG_ENABLE([libtool-lock], +- [AS_HELP_STRING([--disable-libtool-lock], +- [avoid locking (might break parallel builds)])]) +-test no = "$enable_libtool_lock" || enable_libtool_lock=yes +- +-# Some flags need to be propagated to the compiler or linker for good +-# libtool support. +-case $host in +-ia64-*-hpux*) +- # Find out what ABI is being produced by ac_compile, and set mode +- # options accordingly. +- echo 'int i;' > conftest.$ac_ext +- if AC_TRY_EVAL(ac_compile); then +- case `$FILECMD conftest.$ac_objext` in +- *ELF-32*) +- HPUX_IA64_MODE=32 +- ;; +- *ELF-64*) +- HPUX_IA64_MODE=64 +- ;; +- esac +- fi +- rm -rf conftest* +- ;; +-*-*-irix6*) +- # Find out what ABI is being produced by ac_compile, and set linker +- # options accordingly. +- echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext +- if AC_TRY_EVAL(ac_compile); then +- if test yes = "$lt_cv_prog_gnu_ld"; then +- case `$FILECMD conftest.$ac_objext` in +- *32-bit*) +- LD="${LD-ld} -melf32bsmip" +- ;; +- *N32*) +- LD="${LD-ld} -melf32bmipn32" +- ;; +- *64-bit*) +- LD="${LD-ld} -melf64bmip" +- ;; +- esac +- else +- case `$FILECMD conftest.$ac_objext` in +- *32-bit*) +- LD="${LD-ld} -32" +- ;; +- *N32*) +- LD="${LD-ld} -n32" +- ;; +- *64-bit*) +- LD="${LD-ld} -64" +- ;; +- esac +- fi +- fi +- rm -rf conftest* +- ;; +- +-mips64*-*linux*) +- # Find out what ABI is being produced by ac_compile, and set linker +- # options accordingly. +- echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext +- if AC_TRY_EVAL(ac_compile); then +- emul=elf +- case `$FILECMD conftest.$ac_objext` in +- *32-bit*) +- emul="${emul}32" +- ;; +- *64-bit*) +- emul="${emul}64" +- ;; +- esac +- case `$FILECMD conftest.$ac_objext` in +- *MSB*) +- emul="${emul}btsmip" +- ;; +- *LSB*) +- emul="${emul}ltsmip" +- ;; +- esac +- case `$FILECMD conftest.$ac_objext` in +- *N32*) +- emul="${emul}n32" +- ;; +- esac +- LD="${LD-ld} -m $emul" +- fi +- rm -rf conftest* +- ;; +- +-x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ +-s390*-*linux*|s390*-*tpf*|sparc*-*linux*) +- # Find out what ABI is being produced by ac_compile, and set linker +- # options accordingly. Note that the listed cases only cover the +- # situations where additional linker options are needed (such as when +- # doing 32-bit compilation for a host where ld defaults to 64-bit, or +- # vice versa); the common cases where no linker options are needed do +- # not appear in the list. +- echo 'int i;' > conftest.$ac_ext +- if AC_TRY_EVAL(ac_compile); then +- case `$FILECMD conftest.o` in +- *32-bit*) +- case $host in +- x86_64-*kfreebsd*-gnu) +- LD="${LD-ld} -m elf_i386_fbsd" +- ;; +- x86_64-*linux*) +- case `$FILECMD conftest.o` in +- *x86-64*) +- LD="${LD-ld} -m elf32_x86_64" +- ;; +- *) +- LD="${LD-ld} -m elf_i386" +- ;; +- esac +- ;; +- powerpc64le-*linux*) +- LD="${LD-ld} -m elf32lppclinux" +- ;; +- powerpc64-*linux*) +- LD="${LD-ld} -m elf32ppclinux" +- ;; +- s390x-*linux*) +- LD="${LD-ld} -m elf_s390" +- ;; +- sparc64-*linux*) +- LD="${LD-ld} -m elf32_sparc" +- ;; +- esac +- ;; +- *64-bit*) +- case $host in +- x86_64-*kfreebsd*-gnu) +- LD="${LD-ld} -m elf_x86_64_fbsd" +- ;; +- x86_64-*linux*) +- LD="${LD-ld} -m elf_x86_64" +- ;; +- powerpcle-*linux*) +- LD="${LD-ld} -m elf64lppc" +- ;; +- powerpc-*linux*) +- LD="${LD-ld} -m elf64ppc" +- ;; +- s390*-*linux*|s390*-*tpf*) +- LD="${LD-ld} -m elf64_s390" +- ;; +- sparc*-*linux*) +- LD="${LD-ld} -m elf64_sparc" +- ;; +- esac +- ;; +- esac +- fi +- rm -rf conftest* +- ;; +- +-*-*-sco3.2v5*) +- # On SCO OpenServer 5, we need -belf to get full-featured binaries. +- SAVE_CFLAGS=$CFLAGS +- CFLAGS="$CFLAGS -belf" +- AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, +- [AC_LANG_PUSH(C) +- AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) +- AC_LANG_POP]) +- if test yes != "$lt_cv_cc_needs_belf"; then +- # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf +- CFLAGS=$SAVE_CFLAGS +- fi +- ;; +-*-*solaris*) +- # Find out what ABI is being produced by ac_compile, and set linker +- # options accordingly. +- echo 'int i;' > conftest.$ac_ext +- if AC_TRY_EVAL(ac_compile); then +- case `$FILECMD conftest.o` in +- *64-bit*) +- case $lt_cv_prog_gnu_ld in +- yes*) +- case $host in +- i?86-*-solaris*|x86_64-*-solaris*) +- LD="${LD-ld} -m elf_x86_64" +- ;; +- sparc*-*-solaris*) +- LD="${LD-ld} -m elf64_sparc" +- ;; +- esac +- # GNU ld 2.21 introduced _sol2 emulations. Use them if available. +- if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then +- LD=${LD-ld}_sol2 +- fi +- ;; +- *) +- if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then +- LD="${LD-ld} -64" +- fi +- ;; +- esac +- ;; +- esac +- fi +- rm -rf conftest* +- ;; +-esac +- +-need_locks=$enable_libtool_lock +-])# _LT_ENABLE_LOCK +- +- +-# _LT_PROG_AR +-# ----------- +-m4_defun([_LT_PROG_AR], +-[AC_CHECK_TOOLS(AR, [ar], false) +-: ${AR=ar} +-_LT_DECL([], [AR], [1], [The archiver]) +- +-# Use ARFLAGS variable as AR's operation code to sync the variable naming with +-# Automake. If both AR_FLAGS and ARFLAGS are specified, AR_FLAGS should have +-# higher priority because thats what people were doing historically (setting +-# ARFLAGS for automake and AR_FLAGS for libtool). FIXME: Make the AR_FLAGS +-# variable obsoleted/removed. +- +-test ${AR_FLAGS+y} || AR_FLAGS=${ARFLAGS-cr} +-lt_ar_flags=$AR_FLAGS +-_LT_DECL([], [lt_ar_flags], [0], [Flags to create an archive (by configure)]) +- +-# Make AR_FLAGS overridable by 'make ARFLAGS='. Don't try to run-time override +-# by AR_FLAGS because that was never working and AR_FLAGS is about to die. +-_LT_DECL([], [AR_FLAGS], [\@S|@{ARFLAGS-"\@S|@lt_ar_flags"}], +- [Flags to create an archive]) +- +-AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], +- [lt_cv_ar_at_file=no +- AC_COMPILE_IFELSE([AC_LANG_PROGRAM], +- [echo conftest.$ac_objext > conftest.lst +- lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' +- AC_TRY_EVAL([lt_ar_try]) +- if test 0 -eq "$ac_status"; then +- # Ensure the archiver fails upon bogus file names. +- rm -f conftest.$ac_objext libconftest.a +- AC_TRY_EVAL([lt_ar_try]) +- if test 0 -ne "$ac_status"; then +- lt_cv_ar_at_file=@ +- fi +- fi +- rm -f conftest.* libconftest.a +- ]) +- ]) +- +-if test no = "$lt_cv_ar_at_file"; then +- archiver_list_spec= +-else +- archiver_list_spec=$lt_cv_ar_at_file +-fi +-_LT_DECL([], [archiver_list_spec], [1], +- [How to feed a file listing to the archiver]) +-])# _LT_PROG_AR +- +- +-# _LT_CMD_OLD_ARCHIVE +-# ------------------- +-m4_defun([_LT_CMD_OLD_ARCHIVE], +-[_LT_PROG_AR +- +-AC_CHECK_TOOL(STRIP, strip, :) +-test -z "$STRIP" && STRIP=: +-_LT_DECL([], [STRIP], [1], [A symbol stripping program]) +- +-AC_CHECK_TOOL(RANLIB, ranlib, :) +-test -z "$RANLIB" && RANLIB=: +-_LT_DECL([], [RANLIB], [1], +- [Commands used to install an old-style archive]) +- +-# Determine commands to create old-style static archives. +-old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +-old_postinstall_cmds='chmod 644 $oldlib' +-old_postuninstall_cmds= +- +-if test -n "$RANLIB"; then +- case $host_os in +- bitrig* | openbsd*) +- old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" +- ;; +- *) +- old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" +- ;; +- esac +- old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" +-fi +- +-case $host_os in +- darwin*) +- lock_old_archive_extraction=yes ;; +- *) +- lock_old_archive_extraction=no ;; +-esac +-_LT_DECL([], [old_postinstall_cmds], [2]) +-_LT_DECL([], [old_postuninstall_cmds], [2]) +-_LT_TAGDECL([], [old_archive_cmds], [2], +- [Commands used to build an old-style archive]) +-_LT_DECL([], [lock_old_archive_extraction], [0], +- [Whether to use a lock for old archive extraction]) +-])# _LT_CMD_OLD_ARCHIVE +- +- +-# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +-# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) +-# ---------------------------------------------------------------- +-# Check whether the given compiler option works +-AC_DEFUN([_LT_COMPILER_OPTION], +-[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +-m4_require([_LT_DECL_SED])dnl +-AC_CACHE_CHECK([$1], [$2], +- [$2=no +- m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) +- echo "$lt_simple_compile_test_code" > conftest.$ac_ext +- lt_compiler_flag="$3" ## exclude from sc_useless_quotes_in_assignment +- # Insert the option either (1) after the last *FLAGS variable, or +- # (2) before a word containing "conftest.", or (3) at the end. +- # Note that $ac_compile itself does not contain backslashes and begins +- # with a dollar sign (not a hyphen), so the echo should work correctly. +- # The option is referenced via a variable to avoid confusing sed. +- lt_compile=`echo "$ac_compile" | $SED \ +- -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ +- -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ +- -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) +- (eval "$lt_compile" 2>conftest.err) +- ac_status=$? +- cat conftest.err >&AS_MESSAGE_LOG_FD +- echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD +- if (exit $ac_status) && test -s "$ac_outfile"; then +- # The compiler can only warn and ignore the option if not recognized +- # So say no if there are warnings other than the usual output. +- $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp +- $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 +- if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then +- $2=yes +- fi +- fi +- $RM conftest* +-]) +- +-if test yes = "[$]$2"; then +- m4_if([$5], , :, [$5]) +-else +- m4_if([$6], , :, [$6]) +-fi +-])# _LT_COMPILER_OPTION +- +-# Old name: +-AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) +- +- +-# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +-# [ACTION-SUCCESS], [ACTION-FAILURE]) +-# ---------------------------------------------------- +-# Check whether the given linker option works +-AC_DEFUN([_LT_LINKER_OPTION], +-[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +-m4_require([_LT_DECL_SED])dnl +-AC_CACHE_CHECK([$1], [$2], +- [$2=no +- save_LDFLAGS=$LDFLAGS +- LDFLAGS="$LDFLAGS $3" +- echo "$lt_simple_link_test_code" > conftest.$ac_ext +- if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then +- # The linker can only warn and ignore the option if not recognized +- # So say no if there are warnings +- if test -s conftest.err; then +- # Append any errors to the config.log. +- cat conftest.err 1>&AS_MESSAGE_LOG_FD +- $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp +- $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 +- if diff conftest.exp conftest.er2 >/dev/null; then +- $2=yes +- fi +- else +- $2=yes +- fi +- fi +- $RM -r conftest* +- LDFLAGS=$save_LDFLAGS +-]) +- +-if test yes = "[$]$2"; then +- m4_if([$4], , :, [$4]) +-else +- m4_if([$5], , :, [$5]) +-fi +-])# _LT_LINKER_OPTION +- +-# Old name: +-AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) +- +- +-# LT_CMD_MAX_LEN +-#--------------- +-AC_DEFUN([LT_CMD_MAX_LEN], +-[AC_REQUIRE([AC_CANONICAL_HOST])dnl +-# find the maximum length of command line arguments +-AC_MSG_CHECKING([the maximum length of command line arguments]) +-AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl +- i=0 +- teststring=ABCD +- +- case $build_os in +- msdosdjgpp*) +- # On DJGPP, this test can blow up pretty badly due to problems in libc +- # (any single argument exceeding 2000 bytes causes a buffer overrun +- # during glob expansion). Even if it were fixed, the result of this +- # check would be larger than it should be. +- lt_cv_sys_max_cmd_len=12288; # 12K is about right +- ;; +- +- gnu*) +- # Under GNU Hurd, this test is not required because there is +- # no limit to the length of command line arguments. +- # Libtool will interpret -1 as no limit whatsoever +- lt_cv_sys_max_cmd_len=-1; +- ;; +- +- cygwin* | mingw* | cegcc*) +- # On Win9x/ME, this test blows up -- it succeeds, but takes +- # about 5 minutes as the teststring grows exponentially. +- # Worse, since 9x/ME are not pre-emptively multitasking, +- # you end up with a "frozen" computer, even though with patience +- # the test eventually succeeds (with a max line length of 256k). +- # Instead, let's just punt: use the minimum linelength reported by +- # all of the supported platforms: 8192 (on NT/2K/XP). +- lt_cv_sys_max_cmd_len=8192; +- ;; +- +- mint*) +- # On MiNT this can take a long time and run out of memory. +- lt_cv_sys_max_cmd_len=8192; +- ;; +- +- amigaos*) +- # On AmigaOS with pdksh, this test takes hours, literally. +- # So we just punt and use a minimum line length of 8192. +- lt_cv_sys_max_cmd_len=8192; +- ;; +- +- bitrig* | darwin* | dragonfly* | freebsd* | midnightbsd* | netbsd* | openbsd*) +- # This has been around since 386BSD, at least. Likely further. +- if test -x /sbin/sysctl; then +- lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` +- elif test -x /usr/sbin/sysctl; then +- lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` +- else +- lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs +- fi +- # And add a safety zone +- lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` +- lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` +- ;; +- +- interix*) +- # We know the value 262144 and hardcode it with a safety zone (like BSD) +- lt_cv_sys_max_cmd_len=196608 +- ;; +- +- os2*) +- # The test takes a long time on OS/2. +- lt_cv_sys_max_cmd_len=8192 +- ;; +- +- osf*) +- # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure +- # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not +- # nice to cause kernel panics so lets avoid the loop below. +- # First set a reasonable default. +- lt_cv_sys_max_cmd_len=16384 +- # +- if test -x /sbin/sysconfig; then +- case `/sbin/sysconfig -q proc exec_disable_arg_limit` in +- *1*) lt_cv_sys_max_cmd_len=-1 ;; +- esac +- fi +- ;; +- sco3.2v5*) +- lt_cv_sys_max_cmd_len=102400 +- ;; +- sysv5* | sco5v6* | sysv4.2uw2*) +- kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` +- if test -n "$kargmax"; then +- lt_cv_sys_max_cmd_len=`echo $kargmax | $SED 's/.*[[ ]]//'` +- else +- lt_cv_sys_max_cmd_len=32768 +- fi +- ;; +- *) +- lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` +- if test -n "$lt_cv_sys_max_cmd_len" && \ +- test undefined != "$lt_cv_sys_max_cmd_len"; then +- lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` +- lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` +- else +- # Make teststring a little bigger before we do anything with it. +- # a 1K string should be a reasonable start. +- for i in 1 2 3 4 5 6 7 8; do +- teststring=$teststring$teststring +- done +- SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} +- # If test is not a shell built-in, we'll probably end up computing a +- # maximum length that is only half of the actual maximum length, but +- # we can't tell. +- while { test X`env echo "$teststring$teststring" 2>/dev/null` \ +- = "X$teststring$teststring"; } >/dev/null 2>&1 && +- test 17 != "$i" # 1/2 MB should be enough +- do +- i=`expr $i + 1` +- teststring=$teststring$teststring +- done +- # Only check the string length outside the loop. +- lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` +- teststring= +- # Add a significant safety factor because C++ compilers can tack on +- # massive amounts of additional arguments before passing them to the +- # linker. It appears as though 1/2 is a usable value. +- lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` +- fi +- ;; +- esac +-]) +-if test -n "$lt_cv_sys_max_cmd_len"; then +- AC_MSG_RESULT($lt_cv_sys_max_cmd_len) +-else +- AC_MSG_RESULT(none) +-fi +-max_cmd_len=$lt_cv_sys_max_cmd_len +-_LT_DECL([], [max_cmd_len], [0], +- [What is the maximum length of a command?]) +-])# LT_CMD_MAX_LEN +- +-# Old name: +-AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) +- +- +-# _LT_HEADER_DLFCN +-# ---------------- +-m4_defun([_LT_HEADER_DLFCN], +-[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl +-])# _LT_HEADER_DLFCN +- +- +-# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, +-# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) +-# ---------------------------------------------------------------- +-m4_defun([_LT_TRY_DLOPEN_SELF], +-[m4_require([_LT_HEADER_DLFCN])dnl +-if test yes = "$cross_compiling"; then : +- [$4] +-else +- lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 +- lt_status=$lt_dlunknown +- cat > conftest.$ac_ext <<_LT_EOF +-[#line $LINENO "configure" +-#include "confdefs.h" +- +-#if HAVE_DLFCN_H +-#include +-#endif +- +-#include +- +-#ifdef RTLD_GLOBAL +-# define LT_DLGLOBAL RTLD_GLOBAL +-#else +-# ifdef DL_GLOBAL +-# define LT_DLGLOBAL DL_GLOBAL +-# else +-# define LT_DLGLOBAL 0 +-# endif +-#endif +- +-/* We may have to define LT_DLLAZY_OR_NOW in the command line if we +- find out it does not work in some platform. */ +-#ifndef LT_DLLAZY_OR_NOW +-# ifdef RTLD_LAZY +-# define LT_DLLAZY_OR_NOW RTLD_LAZY +-# else +-# ifdef DL_LAZY +-# define LT_DLLAZY_OR_NOW DL_LAZY +-# else +-# ifdef RTLD_NOW +-# define LT_DLLAZY_OR_NOW RTLD_NOW +-# else +-# ifdef DL_NOW +-# define LT_DLLAZY_OR_NOW DL_NOW +-# else +-# define LT_DLLAZY_OR_NOW 0 +-# endif +-# endif +-# endif +-# endif +-#endif +- +-/* When -fvisibility=hidden is used, assume the code has been annotated +- correspondingly for the symbols needed. */ +-#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-int fnord () __attribute__((visibility("default"))); +-#endif +- +-int fnord () { return 42; } +-int main () +-{ +- void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +- int status = $lt_dlunknown; +- +- if (self) +- { +- if (dlsym (self,"fnord")) status = $lt_dlno_uscore; +- else +- { +- if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; +- else puts (dlerror ()); +- } +- /* dlclose (self); */ +- } +- else +- puts (dlerror ()); +- +- return status; +-}] +-_LT_EOF +- if AC_TRY_EVAL(ac_link) && test -s "conftest$ac_exeext" 2>/dev/null; then +- (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null +- lt_status=$? +- case x$lt_status in +- x$lt_dlno_uscore) $1 ;; +- x$lt_dlneed_uscore) $2 ;; +- x$lt_dlunknown|x*) $3 ;; +- esac +- else : +- # compilation failed +- $3 +- fi +-fi +-rm -fr conftest* +-])# _LT_TRY_DLOPEN_SELF +- +- +-# LT_SYS_DLOPEN_SELF +-# ------------------ +-AC_DEFUN([LT_SYS_DLOPEN_SELF], +-[m4_require([_LT_HEADER_DLFCN])dnl +-if test yes != "$enable_dlopen"; then +- enable_dlopen=unknown +- enable_dlopen_self=unknown +- enable_dlopen_self_static=unknown +-else +- lt_cv_dlopen=no +- lt_cv_dlopen_libs= +- +- case $host_os in +- beos*) +- lt_cv_dlopen=load_add_on +- lt_cv_dlopen_libs= +- lt_cv_dlopen_self=yes +- ;; +- +- mingw* | pw32* | cegcc*) +- lt_cv_dlopen=LoadLibrary +- lt_cv_dlopen_libs= +- ;; +- +- cygwin*) +- lt_cv_dlopen=dlopen +- lt_cv_dlopen_libs= +- ;; +- +- darwin*) +- # if libdl is installed we need to link against it +- AC_CHECK_LIB([dl], [dlopen], +- [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl],[ +- lt_cv_dlopen=dyld +- lt_cv_dlopen_libs= +- lt_cv_dlopen_self=yes +- ]) +- ;; +- +- tpf*) +- # Don't try to run any link tests for TPF. We know it's impossible +- # because TPF is a cross-compiler, and we know how we open DSOs. +- lt_cv_dlopen=dlopen +- lt_cv_dlopen_libs= +- lt_cv_dlopen_self=no +- ;; +- +- *) +- AC_CHECK_FUNC([shl_load], +- [lt_cv_dlopen=shl_load], +- [AC_CHECK_LIB([dld], [shl_load], +- [lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld], +- [AC_CHECK_FUNC([dlopen], +- [lt_cv_dlopen=dlopen], +- [AC_CHECK_LIB([dl], [dlopen], +- [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl], +- [AC_CHECK_LIB([svld], [dlopen], +- [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld], +- [AC_CHECK_LIB([dld], [dld_link], +- [lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld]) +- ]) +- ]) +- ]) +- ]) +- ]) +- ;; +- esac +- +- if test no = "$lt_cv_dlopen"; then +- enable_dlopen=no +- else +- enable_dlopen=yes +- fi +- +- case $lt_cv_dlopen in +- dlopen) +- save_CPPFLAGS=$CPPFLAGS +- test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" +- +- save_LDFLAGS=$LDFLAGS +- wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" +- +- save_LIBS=$LIBS +- LIBS="$lt_cv_dlopen_libs $LIBS" +- +- AC_CACHE_CHECK([whether a program can dlopen itself], +- lt_cv_dlopen_self, [dnl +- _LT_TRY_DLOPEN_SELF( +- lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, +- lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) +- ]) +- +- if test yes = "$lt_cv_dlopen_self"; then +- wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" +- AC_CACHE_CHECK([whether a statically linked program can dlopen itself], +- lt_cv_dlopen_self_static, [dnl +- _LT_TRY_DLOPEN_SELF( +- lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, +- lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) +- ]) +- fi +- +- CPPFLAGS=$save_CPPFLAGS +- LDFLAGS=$save_LDFLAGS +- LIBS=$save_LIBS +- ;; +- esac +- +- case $lt_cv_dlopen_self in +- yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; +- *) enable_dlopen_self=unknown ;; +- esac +- +- case $lt_cv_dlopen_self_static in +- yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; +- *) enable_dlopen_self_static=unknown ;; +- esac +-fi +-_LT_DECL([dlopen_support], [enable_dlopen], [0], +- [Whether dlopen is supported]) +-_LT_DECL([dlopen_self], [enable_dlopen_self], [0], +- [Whether dlopen of programs is supported]) +-_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], +- [Whether dlopen of statically linked programs is supported]) +-])# LT_SYS_DLOPEN_SELF +- +-# Old name: +-AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) +- +- +-# _LT_COMPILER_C_O([TAGNAME]) +-# --------------------------- +-# Check to see if options -c and -o are simultaneously supported by compiler. +-# This macro does not hard code the compiler like AC_PROG_CC_C_O. +-m4_defun([_LT_COMPILER_C_O], +-[m4_require([_LT_DECL_SED])dnl +-m4_require([_LT_FILEUTILS_DEFAULTS])dnl +-m4_require([_LT_TAG_COMPILER])dnl +-AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], +- [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], +- [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no +- $RM -r conftest 2>/dev/null +- mkdir conftest +- cd conftest +- mkdir out +- echo "$lt_simple_compile_test_code" > conftest.$ac_ext +- +- lt_compiler_flag="-o out/conftest2.$ac_objext" +- # Insert the option either (1) after the last *FLAGS variable, or +- # (2) before a word containing "conftest.", or (3) at the end. +- # Note that $ac_compile itself does not contain backslashes and begins +- # with a dollar sign (not a hyphen), so the echo should work correctly. +- lt_compile=`echo "$ac_compile" | $SED \ +- -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ +- -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ +- -e 's:$: $lt_compiler_flag:'` +- (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) +- (eval "$lt_compile" 2>out/conftest.err) +- ac_status=$? +- cat out/conftest.err >&AS_MESSAGE_LOG_FD +- echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD +- if (exit $ac_status) && test -s out/conftest2.$ac_objext +- then +- # The compiler can only warn and ignore the option if not recognized +- # So say no if there are warnings +- $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp +- $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 +- if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then +- _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes +- fi +- fi +- chmod u+w . 2>&AS_MESSAGE_LOG_FD +- $RM conftest* +- # SGI C++ compiler will create directory out/ii_files/ for +- # template instantiation +- test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files +- $RM out/* && rmdir out +- cd .. +- $RM -r conftest +- $RM conftest* +-]) +-_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], +- [Does compiler simultaneously support -c and -o options?]) +-])# _LT_COMPILER_C_O +- +- +-# _LT_COMPILER_FILE_LOCKS([TAGNAME]) +-# ---------------------------------- +-# Check to see if we can do hard links to lock some files if needed +-m4_defun([_LT_COMPILER_FILE_LOCKS], +-[m4_require([_LT_ENABLE_LOCK])dnl +-m4_require([_LT_FILEUTILS_DEFAULTS])dnl +-_LT_COMPILER_C_O([$1]) +- +-hard_links=nottested +-if test no = "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" && test no != "$need_locks"; then +- # do not overwrite the value of need_locks provided by the user +- AC_MSG_CHECKING([if we can lock with hard links]) +- hard_links=yes +- $RM conftest* +- ln conftest.a conftest.b 2>/dev/null && hard_links=no +- touch conftest.a +- ln conftest.a conftest.b 2>&5 || hard_links=no +- ln conftest.a conftest.b 2>/dev/null && hard_links=no +- AC_MSG_RESULT([$hard_links]) +- if test no = "$hard_links"; then +- AC_MSG_WARN(['$CC' does not support '-c -o', so 'make -j' may be unsafe]) +- need_locks=warn +- fi +-else +- need_locks=no +-fi +-_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) +-])# _LT_COMPILER_FILE_LOCKS +- +- +-# _LT_CHECK_OBJDIR +-# ---------------- +-m4_defun([_LT_CHECK_OBJDIR], +-[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], +-[rm -f .libs 2>/dev/null +-mkdir .libs 2>/dev/null +-if test -d .libs; then +- lt_cv_objdir=.libs +-else +- # MS-DOS does not allow filenames that begin with a dot. +- lt_cv_objdir=_libs +-fi +-rmdir .libs 2>/dev/null]) +-objdir=$lt_cv_objdir +-_LT_DECL([], [objdir], [0], +- [The name of the directory that contains temporary libtool files])dnl +-m4_pattern_allow([LT_OBJDIR])dnl +-AC_DEFINE_UNQUOTED([LT_OBJDIR], "$lt_cv_objdir/", +- [Define to the sub-directory where libtool stores uninstalled libraries.]) +-])# _LT_CHECK_OBJDIR +- +- +-# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) +-# -------------------------------------- +-# Check hardcoding attributes. +-m4_defun([_LT_LINKER_HARDCODE_LIBPATH], +-[AC_MSG_CHECKING([how to hardcode library paths into programs]) +-_LT_TAGVAR(hardcode_action, $1)= +-if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || +- test -n "$_LT_TAGVAR(runpath_var, $1)" || +- test yes = "$_LT_TAGVAR(hardcode_automatic, $1)"; then +- +- # We can hardcode non-existent directories. +- if test no != "$_LT_TAGVAR(hardcode_direct, $1)" && +- # If the only mechanism to avoid hardcoding is shlibpath_var, we +- # have to relink, otherwise we might link with an installed library +- # when we should be linking with a yet-to-be-installed one +- ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" && +- test no != "$_LT_TAGVAR(hardcode_minus_L, $1)"; then +- # Linking always hardcodes the temporary library directory. +- _LT_TAGVAR(hardcode_action, $1)=relink +- else +- # We can link without hardcoding, and we can hardcode nonexisting dirs. +- _LT_TAGVAR(hardcode_action, $1)=immediate +- fi +-else +- # We cannot hardcode anything, or else we can only hardcode existing +- # directories. +- _LT_TAGVAR(hardcode_action, $1)=unsupported +-fi +-AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) +- +-if test relink = "$_LT_TAGVAR(hardcode_action, $1)" || +- test yes = "$_LT_TAGVAR(inherit_rpath, $1)"; then +- # Fast installation is not supported +- enable_fast_install=no +-elif test yes = "$shlibpath_overrides_runpath" || +- test no = "$enable_shared"; then +- # Fast installation is not necessary +- enable_fast_install=needless +-fi +-_LT_TAGDECL([], [hardcode_action], [0], +- [How to hardcode a shared library path into an executable]) +-])# _LT_LINKER_HARDCODE_LIBPATH +- +- +-# _LT_CMD_STRIPLIB +-# ---------------- +-m4_defun([_LT_CMD_STRIPLIB], +-[m4_require([_LT_DECL_EGREP]) +-striplib= +-old_striplib= +-AC_MSG_CHECKING([whether stripping libraries is possible]) +-if test -z "$STRIP"; then +- AC_MSG_RESULT([no]) +-else +- if $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then +- old_striplib="$STRIP --strip-debug" +- striplib="$STRIP --strip-unneeded" +- AC_MSG_RESULT([yes]) +- else +- case $host_os in +- darwin*) +- # FIXME - insert some real tests, host_os isn't really good enough +- striplib="$STRIP -x" +- old_striplib="$STRIP -S" +- AC_MSG_RESULT([yes]) +- ;; +- freebsd*) +- if $STRIP -V 2>&1 | $GREP "elftoolchain" >/dev/null; then +- old_striplib="$STRIP --strip-debug" +- striplib="$STRIP --strip-unneeded" +- AC_MSG_RESULT([yes]) +- else +- AC_MSG_RESULT([no]) +- fi +- ;; +- *) +- AC_MSG_RESULT([no]) +- ;; +- esac +- fi +-fi +-_LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) +-_LT_DECL([], [striplib], [1]) +-])# _LT_CMD_STRIPLIB +- +- +-# _LT_PREPARE_MUNGE_PATH_LIST +-# --------------------------- +-# Make sure func_munge_path_list() is defined correctly. +-m4_defun([_LT_PREPARE_MUNGE_PATH_LIST], +-[[# func_munge_path_list VARIABLE PATH +-# ----------------------------------- +-# VARIABLE is name of variable containing _space_ separated list of +-# directories to be munged by the contents of PATH, which is string +-# having a format: +-# "DIR[:DIR]:" +-# string "DIR[ DIR]" will be prepended to VARIABLE +-# ":DIR[:DIR]" +-# string "DIR[ DIR]" will be appended to VARIABLE +-# "DIRP[:DIRP]::[DIRA:]DIRA" +-# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +-# "DIRA[ DIRA]" will be appended to VARIABLE +-# "DIR[:DIR]" +-# VARIABLE will be replaced by "DIR[ DIR]" +-func_munge_path_list () +-{ +- case x@S|@2 in +- x) +- ;; +- *:) +- eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'` \@S|@@S|@1\" +- ;; +- x:*) +- eval @S|@1=\"\@S|@@S|@1 `$ECHO @S|@2 | $SED 's/:/ /g'`\" +- ;; +- *::*) +- eval @S|@1=\"\@S|@@S|@1\ `$ECHO @S|@2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" +- eval @S|@1=\"`$ECHO @S|@2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \@S|@@S|@1\" +- ;; +- *) +- eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'`\" +- ;; +- esac +-} +-]])# _LT_PREPARE_PATH_LIST +- +- +-# _LT_SYS_DYNAMIC_LINKER([TAG]) +-# ----------------------------- +-# PORTME Fill in your ld.so characteristics +-m4_defun([_LT_SYS_DYNAMIC_LINKER], +-[AC_REQUIRE([AC_CANONICAL_HOST])dnl +-m4_require([_LT_DECL_EGREP])dnl +-m4_require([_LT_FILEUTILS_DEFAULTS])dnl +-m4_require([_LT_DECL_OBJDUMP])dnl +-m4_require([_LT_DECL_SED])dnl +-m4_require([_LT_CHECK_SHELL_FEATURES])dnl +-m4_require([_LT_PREPARE_MUNGE_PATH_LIST])dnl +-AC_MSG_CHECKING([dynamic linker characteristics]) +-m4_if([$1], +- [], [ +-if test yes = "$GCC"; then +- case $host_os in +- darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; +- *) lt_awk_arg='/^libraries:/' ;; +- esac +- case $host_os in +- mingw* | cegcc*) lt_sed_strip_eq='s|=\([[A-Za-z]]:\)|\1|g' ;; +- *) lt_sed_strip_eq='s|=/|/|g' ;; +- esac +- lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` +- case $lt_search_path_spec in +- *\;*) +- # if the path contains ";" then we assume it to be the separator +- # otherwise default to the standard path separator (i.e. ":") - it is +- # assumed that no part of a normal pathname contains ";" but that should +- # okay in the real world where ";" in dirpaths is itself problematic. +- lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` +- ;; +- *) +- lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` +- ;; +- esac +- # Ok, now we have the path, separated by spaces, we can step through it +- # and add multilib dir if necessary... +- lt_tmp_lt_search_path_spec= +- lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` +- # ...but if some path component already ends with the multilib dir we assume +- # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). +- case "$lt_multi_os_dir; $lt_search_path_spec " in +- "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) +- lt_multi_os_dir= +- ;; +- esac +- for lt_sys_path in $lt_search_path_spec; do +- if test -d "$lt_sys_path$lt_multi_os_dir"; then +- lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" +- elif test -n "$lt_multi_os_dir"; then +- test -d "$lt_sys_path" && \ +- lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" +- fi +- done +- lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +-BEGIN {RS = " "; FS = "/|\n";} { +- lt_foo = ""; +- lt_count = 0; +- for (lt_i = NF; lt_i > 0; lt_i--) { +- if ($lt_i != "" && $lt_i != ".") { +- if ($lt_i == "..") { +- lt_count++; +- } else { +- if (lt_count == 0) { +- lt_foo = "/" $lt_i lt_foo; +- } else { +- lt_count--; +- } +- } +- } +- } +- if (lt_foo != "") { lt_freq[[lt_foo]]++; } +- if (lt_freq[[lt_foo]] == 1) { print lt_foo; } +-}'` +- # AWK program above erroneously prepends '/' to C:/dos/paths +- # for these hosts. +- case $host_os in +- mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ +- $SED 's|/\([[A-Za-z]]:\)|\1|g'` ;; +- esac +- sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +-else +- sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +-fi]) +-library_names_spec= +-libname_spec='lib$name' +-soname_spec= +-shrext_cmds=.so +-postinstall_cmds= +-postuninstall_cmds= +-finish_cmds= +-finish_eval= +-shlibpath_var= +-shlibpath_overrides_runpath=unknown +-version_type=none +-dynamic_linker="$host_os ld.so" +-sys_lib_dlsearch_path_spec="/lib /usr/lib" +-need_lib_prefix=unknown +-hardcode_into_libs=no +- +-# when you set need_version to no, make sure it does not cause -set_version +-# flags to be left without arguments +-need_version=unknown +- +-AC_ARG_VAR([LT_SYS_LIBRARY_PATH], +-[User-defined run-time library search path.]) +- +-case $host_os in +-aix3*) +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$release$shared_ext$versuffix $libname.a' +- shlibpath_var=LIBPATH +- +- # AIX 3 has no versioning support, so we append a major version to the name. +- soname_spec='$libname$release$shared_ext$major' +- ;; +- +-aix[[4-9]]*) +- version_type=linux # correct to gnu/linux during the next big refactor +- need_lib_prefix=no +- need_version=no +- hardcode_into_libs=yes +- if test ia64 = "$host_cpu"; then +- # AIX 5 supports IA64 +- library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' +- shlibpath_var=LD_LIBRARY_PATH +- else +- # With GCC up to 2.95.x, collect2 would create an import file +- # for dependence libraries. The import file would start with +- # the line '#! .'. This would cause the generated library to +- # depend on '.', always an invalid library. This was fixed in +- # development snapshots of GCC prior to 3.0. +- case $host_os in +- aix4 | aix4.[[01]] | aix4.[[01]].*) +- if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' +- echo ' yes ' +- echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then +- : +- else +- can_build_shared=no +- fi +- ;; +- esac +- # Using Import Files as archive members, it is possible to support +- # filename-based versioning of shared library archives on AIX. While +- # this would work for both with and without runtime linking, it will +- # prevent static linking of such archives. So we do filename-based +- # shared library versioning with .so extension only, which is used +- # when both runtime linking and shared linking is enabled. +- # Unfortunately, runtime linking may impact performance, so we do +- # not want this to be the default eventually. Also, we use the +- # versioned .so libs for executables only if there is the -brtl +- # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. +- # To allow for filename-based versioning support, we need to create +- # libNAME.so.V as an archive file, containing: +- # *) an Import File, referring to the versioned filename of the +- # archive as well as the shared archive member, telling the +- # bitwidth (32 or 64) of that shared object, and providing the +- # list of exported symbols of that shared object, eventually +- # decorated with the 'weak' keyword +- # *) the shared object with the F_LOADONLY flag set, to really avoid +- # it being seen by the linker. +- # At run time we better use the real file rather than another symlink, +- # but for link time we create the symlink libNAME.so -> libNAME.so.V +- +- case $with_aix_soname,$aix_use_runtimelinking in +- # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct +- # soname into executable. Probably we can add versioning support to +- # collect2, so additional links can be useful in future. +- aix,yes) # traditional libtool +- dynamic_linker='AIX unversionable lib.so' +- # If using run time linking (on AIX 4.2 or later) use lib.so +- # instead of lib.a to let people know that these are not +- # typical AIX shared libraries. +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- ;; +- aix,no) # traditional AIX only +- dynamic_linker='AIX lib.a[(]lib.so.V[)]' +- # We preserve .a as extension for shared libraries through AIX4.2 +- # and later when we are not doing run time linking. +- library_names_spec='$libname$release.a $libname.a' +- soname_spec='$libname$release$shared_ext$major' +- ;; +- svr4,*) # full svr4 only +- dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)]" +- library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' +- # We do not specify a path in Import Files, so LIBPATH fires. +- shlibpath_overrides_runpath=yes +- ;; +- *,yes) # both, prefer svr4 +- dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)], lib.a[(]lib.so.V[)]" +- library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' +- # unpreferred sharedlib libNAME.a needs extra handling +- postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' +- postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' +- # We do not specify a path in Import Files, so LIBPATH fires. +- shlibpath_overrides_runpath=yes +- ;; +- *,no) # both, prefer aix +- dynamic_linker="AIX lib.a[(]lib.so.V[)], lib.so.V[(]$shared_archive_member_spec.o[)]" +- library_names_spec='$libname$release.a $libname.a' +- soname_spec='$libname$release$shared_ext$major' +- # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling +- postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' +- postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' +- ;; +- esac +- shlibpath_var=LIBPATH +- fi +- ;; +- +-amigaos*) +- case $host_cpu in +- powerpc) +- # Since July 2007 AmigaOS4 officially supports .so libraries. +- # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- ;; +- m68k) +- library_names_spec='$libname.ixlibrary $libname.a' +- # Create ${libname}_ixlibrary.a entries in /sys/libs. +- finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' +- ;; +- esac +- ;; +- +-beos*) +- library_names_spec='$libname$shared_ext' +- dynamic_linker="$host_os ld.so" +- shlibpath_var=LIBRARY_PATH +- ;; +- +-bsdi[[45]]*) +- version_type=linux # correct to gnu/linux during the next big refactor +- need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' +- shlibpath_var=LD_LIBRARY_PATH +- sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" +- sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" +- # the default ld.so.conf also contains /usr/contrib/lib and +- # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow +- # libtool to hard-code these into programs +- ;; +- +-cygwin* | mingw* | pw32* | cegcc*) +- version_type=windows +- shrext_cmds=.dll +- need_version=no +- need_lib_prefix=no +- +- case $GCC,$cc_basename in +- yes,*) +- # gcc +- library_names_spec='$libname.dll.a' +- # DLL is installed to $(libdir)/../bin by postinstall_cmds +- postinstall_cmds='base_file=`basename \$file`~ +- dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ +- dldir=$destdir/`dirname \$dlpath`~ +- test -d \$dldir || mkdir -p \$dldir~ +- $install_prog $dir/$dlname \$dldir/$dlname~ +- chmod a+x \$dldir/$dlname~ +- if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then +- eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; +- fi' +- postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ +- dlpath=$dir/\$dldll~ +- $RM \$dlpath' +- shlibpath_overrides_runpath=yes +- +- case $host_os in +- cygwin*) +- # Cygwin DLLs use 'cyg' prefix rather than 'lib' +- soname_spec='`echo $libname | $SED -e 's/^lib/cyg/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' +-m4_if([$1], [],[ +- sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"]) +- ;; +- mingw* | cegcc*) +- # MinGW DLLs use traditional 'lib' prefix +- soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' +- ;; +- pw32*) +- # pw32 DLLs use 'pw' prefix rather than 'lib' +- library_names_spec='`echo $libname | $SED -e 's/^lib/pw/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' +- ;; +- esac +- dynamic_linker='Win32 ld.exe' +- ;; +- +- *,cl* | *,icl*) +- # Native MSVC or ICC +- libname_spec='$name' +- soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' +- library_names_spec='$libname.dll.lib' +- +- case $build_os in +- mingw*) +- sys_lib_search_path_spec= +- lt_save_ifs=$IFS +- IFS=';' +- for lt_path in $LIB +- do +- IFS=$lt_save_ifs +- # Let DOS variable expansion print the short 8.3 style file name. +- lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` +- sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" +- done +- IFS=$lt_save_ifs +- # Convert to MSYS style. +- sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` +- ;; +- cygwin*) +- # Convert to unix form, then to dos form, then back to unix form +- # but this time dos style (no spaces!) so that the unix form looks +- # like /cygdrive/c/PROGRA~1:/cygdr... +- sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` +- sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` +- sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` +- ;; +- *) +- sys_lib_search_path_spec=$LIB +- if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then +- # It is most probably a Windows format PATH. +- sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` +- else +- sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` +- fi +- # FIXME: find the short name or the path components, as spaces are +- # common. (e.g. "Program Files" -> "PROGRA~1") +- ;; +- esac +- +- # DLL is installed to $(libdir)/../bin by postinstall_cmds +- postinstall_cmds='base_file=`basename \$file`~ +- dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ +- dldir=$destdir/`dirname \$dlpath`~ +- test -d \$dldir || mkdir -p \$dldir~ +- $install_prog $dir/$dlname \$dldir/$dlname' +- postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ +- dlpath=$dir/\$dldll~ +- $RM \$dlpath' +- shlibpath_overrides_runpath=yes +- dynamic_linker='Win32 link.exe' +- ;; +- +- *) +- # Assume MSVC and ICC wrapper +- library_names_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext $libname.lib' +- dynamic_linker='Win32 ld.exe' +- ;; +- esac +- # FIXME: first we should search . and the directory the executable is in +- shlibpath_var=PATH +- ;; +- +-darwin* | rhapsody*) +- dynamic_linker="$host_os dyld" +- version_type=darwin +- need_lib_prefix=no +- need_version=no +- library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' +- soname_spec='$libname$release$major$shared_ext' +- shlibpath_overrides_runpath=yes +- shlibpath_var=DYLD_LIBRARY_PATH +- shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +-m4_if([$1], [],[ +- sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) +- sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' +- ;; +- +-dgux*) +- version_type=linux # correct to gnu/linux during the next big refactor +- need_lib_prefix=no +- need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- shlibpath_var=LD_LIBRARY_PATH +- ;; +- +-freebsd* | dragonfly* | midnightbsd*) +- # DragonFly does not have aout. When/if they implement a new +- # versioning mechanism, adjust this. +- if test -x /usr/bin/objformat; then +- objformat=`/usr/bin/objformat` +- else +- case $host_os in +- freebsd[[23]].*) objformat=aout ;; +- *) objformat=elf ;; +- esac +- fi +- version_type=freebsd-$objformat +- case $version_type in +- freebsd-elf*) +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- need_version=no +- need_lib_prefix=no +- ;; +- freebsd-*) +- library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' +- need_version=yes +- ;; +- esac +- shlibpath_var=LD_LIBRARY_PATH +- case $host_os in +- freebsd2.*) +- shlibpath_overrides_runpath=yes +- ;; +- freebsd3.[[01]]* | freebsdelf3.[[01]]*) +- shlibpath_overrides_runpath=yes +- hardcode_into_libs=yes +- ;; +- freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ +- freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) +- shlibpath_overrides_runpath=no +- hardcode_into_libs=yes +- ;; +- *) # from 4.6 on, and DragonFly +- shlibpath_overrides_runpath=yes +- hardcode_into_libs=yes +- ;; +- esac +- ;; +- +-haiku*) +- version_type=linux # correct to gnu/linux during the next big refactor +- need_lib_prefix=no +- need_version=no +- dynamic_linker="$host_os runtime_loader" +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- shlibpath_var=LIBRARY_PATH +- shlibpath_overrides_runpath=no +- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' +- hardcode_into_libs=yes +- ;; +- +-hpux9* | hpux10* | hpux11*) +- # Give a soname corresponding to the major version so that dld.sl refuses to +- # link against other versions. +- version_type=sunos +- need_lib_prefix=no +- need_version=no +- case $host_cpu in +- ia64*) +- shrext_cmds='.so' +- hardcode_into_libs=yes +- dynamic_linker="$host_os dld.so" +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- if test 32 = "$HPUX_IA64_MODE"; then +- sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" +- sys_lib_dlsearch_path_spec=/usr/lib/hpux32 +- else +- sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" +- sys_lib_dlsearch_path_spec=/usr/lib/hpux64 +- fi +- ;; +- hppa*64*) +- shrext_cmds='.sl' +- hardcode_into_libs=yes +- dynamic_linker="$host_os dld.sl" +- shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH +- shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" +- sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec +- ;; +- *) +- shrext_cmds='.sl' +- dynamic_linker="$host_os dld.sl" +- shlibpath_var=SHLIB_PATH +- shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- ;; +- esac +- # HP-UX runs *really* slowly unless shared libraries are mode 555, ... +- postinstall_cmds='chmod 555 $lib' +- # or fails outright, so override atomically: +- install_override_mode=555 +- ;; +- +-interix[[3-9]]*) +- version_type=linux # correct to gnu/linux during the next big refactor +- need_lib_prefix=no +- need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=no +- hardcode_into_libs=yes +- ;; +- +-irix5* | irix6* | nonstopux*) +- case $host_os in +- nonstopux*) version_type=nonstopux ;; +- *) +- if test yes = "$lt_cv_prog_gnu_ld"; then +- version_type=linux # correct to gnu/linux during the next big refactor +- else +- version_type=irix +- fi ;; +- esac +- need_lib_prefix=no +- need_version=no +- soname_spec='$libname$release$shared_ext$major' +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' +- case $host_os in +- irix5* | nonstopux*) +- libsuff= shlibsuff= +- ;; +- *) +- case $LD in # libtool.m4 will add one of these switches to LD +- *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") +- libsuff= shlibsuff= libmagic=32-bit;; +- *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") +- libsuff=32 shlibsuff=N32 libmagic=N32;; +- *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") +- libsuff=64 shlibsuff=64 libmagic=64-bit;; +- *) libsuff= shlibsuff= libmagic=never-match;; +- esac +- ;; +- esac +- shlibpath_var=LD_LIBRARY${shlibsuff}_PATH +- shlibpath_overrides_runpath=no +- sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" +- sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" +- hardcode_into_libs=yes +- ;; +- +-# No shared lib support for Linux oldld, aout, or coff. +-linux*oldld* | linux*aout* | linux*coff*) +- dynamic_linker=no +- ;; +- +-linux*android*) +- version_type=none # Android doesn't support versioned libraries. +- need_lib_prefix=no +- need_version=no +- library_names_spec='$libname$release$shared_ext' +- soname_spec='$libname$release$shared_ext' +- finish_cmds= +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=yes +- +- # This implies no fast_install, which is unacceptable. +- # Some rework will be needed to allow for fast_install +- # before this can be enabled. +- hardcode_into_libs=yes +- +- dynamic_linker='Android linker' +- # Don't embed -rpath directories since the linker doesn't support them. +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- ;; +- +-# This must be glibc/ELF. +-linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) +- version_type=linux # correct to gnu/linux during the next big refactor +- need_lib_prefix=no +- need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=no +- +- # Some binutils ld are patched to set DT_RUNPATH +- AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath], +- [lt_cv_shlibpath_overrides_runpath=no +- save_LDFLAGS=$LDFLAGS +- save_libdir=$libdir +- eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ +- LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" +- AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], +- [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], +- [lt_cv_shlibpath_overrides_runpath=yes])]) +- LDFLAGS=$save_LDFLAGS +- libdir=$save_libdir +- ]) +- shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath +- +- # This implies no fast_install, which is unacceptable. +- # Some rework will be needed to allow for fast_install +- # before this can be enabled. +- hardcode_into_libs=yes +- +- # Add ABI-specific directories to the system library path. +- sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" +- +- # Ideally, we could use ldconfig to report *all* directores which are +- # searched for libraries, however this is still not possible. Aside from not +- # being certain /sbin/ldconfig is available, command +- # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, +- # even though it is searched at run-time. Try to do the best guess by +- # appending ld.so.conf contents (and includes) to the search path. +- if test -f /etc/ld.so.conf; then +- lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` +- sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" +- fi +- +- # We used to test for /lib/ld.so.1 and disable shared libraries on +- # powerpc, because MkLinux only supported shared libraries with the +- # GNU dynamic linker. Since this was broken with cross compilers, +- # most powerpc-linux boxes support dynamic linking these days and +- # people can always --disable-shared, the test was removed, and we +- # assume the GNU/Linux dynamic linker is in use. +- dynamic_linker='GNU/Linux ld.so' +- ;; +- +-netbsd*) +- version_type=sunos +- need_lib_prefix=no +- need_version=no +- if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then +- library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' +- finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' +- dynamic_linker='NetBSD (a.out) ld.so' +- else +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- dynamic_linker='NetBSD ld.elf_so' +- fi +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=yes +- hardcode_into_libs=yes +- ;; +- +-newsos6) +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=yes +- ;; +- +-*nto* | *qnx*) +- version_type=qnx +- need_lib_prefix=no +- need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=no +- hardcode_into_libs=yes +- dynamic_linker='ldqnx.so' +- ;; +- +-openbsd* | bitrig*) +- version_type=sunos +- sys_lib_dlsearch_path_spec=/usr/lib +- need_lib_prefix=no +- if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then +- need_version=no +- else +- need_version=yes +- fi +- library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' +- finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=yes +- ;; +- +-os2*) +- libname_spec='$name' +- version_type=windows +- shrext_cmds=.dll +- need_version=no +- need_lib_prefix=no +- # OS/2 can only load a DLL with a base name of 8 characters or less. +- soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; +- v=$($ECHO $release$versuffix | tr -d .-); +- n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); +- $ECHO $n$v`$shared_ext' +- library_names_spec='${libname}_dll.$libext' +- dynamic_linker='OS/2 ld.exe' +- shlibpath_var=BEGINLIBPATH +- sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +- sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec +- postinstall_cmds='base_file=`basename \$file`~ +- dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ +- dldir=$destdir/`dirname \$dlpath`~ +- test -d \$dldir || mkdir -p \$dldir~ +- $install_prog $dir/$dlname \$dldir/$dlname~ +- chmod a+x \$dldir/$dlname~ +- if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then +- eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; +- fi' +- postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ +- dlpath=$dir/\$dldll~ +- $RM \$dlpath' +- ;; +- +-osf3* | osf4* | osf5*) +- version_type=osf +- need_lib_prefix=no +- need_version=no +- soname_spec='$libname$release$shared_ext$major' +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- shlibpath_var=LD_LIBRARY_PATH +- sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" +- sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec +- ;; +- +-rdos*) +- dynamic_linker=no +- ;; +- +-solaris*) +- version_type=linux # correct to gnu/linux during the next big refactor +- need_lib_prefix=no +- need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=yes +- hardcode_into_libs=yes +- # ldd complains unless libraries are executable +- postinstall_cmds='chmod +x $lib' +- ;; +- +-sunos4*) +- version_type=sunos +- library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' +- finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=yes +- if test yes = "$with_gnu_ld"; then +- need_lib_prefix=no +- fi +- need_version=yes +- ;; +- +-sysv4 | sysv4.3*) +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- shlibpath_var=LD_LIBRARY_PATH +- case $host_vendor in +- sni) +- shlibpath_overrides_runpath=no +- need_lib_prefix=no +- runpath_var=LD_RUN_PATH +- ;; +- siemens) +- need_lib_prefix=no +- ;; +- motorola) +- need_lib_prefix=no +- need_version=no +- shlibpath_overrides_runpath=no +- sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' +- ;; +- esac +- ;; +- +-sysv4*MP*) +- if test -d /usr/nec; then +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' +- soname_spec='$libname$shared_ext.$major' +- shlibpath_var=LD_LIBRARY_PATH +- fi +- ;; +- +-sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) +- version_type=sco +- need_lib_prefix=no +- need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=yes +- hardcode_into_libs=yes +- if test yes = "$with_gnu_ld"; then +- sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' +- else +- sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' +- case $host_os in +- sco3.2v5*) +- sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" +- ;; +- esac +- fi +- sys_lib_dlsearch_path_spec='/usr/lib' +- ;; +- +-tpf*) +- # TPF is a cross-target only. Preferred cross-host = GNU/Linux. +- version_type=linux # correct to gnu/linux during the next big refactor +- need_lib_prefix=no +- need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=no +- hardcode_into_libs=yes +- ;; +- +-uts4*) +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- shlibpath_var=LD_LIBRARY_PATH +- ;; +- +-*) +- dynamic_linker=no +- ;; +-esac +-AC_MSG_RESULT([$dynamic_linker]) +-test no = "$dynamic_linker" && can_build_shared=no +- +-variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +-if test yes = "$GCC"; then +- variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +-fi +- +-if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then +- sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec +-fi +- +-if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then +- sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec +-fi +- +-# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... +-configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec +- +-# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code +-func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" +- +-# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool +-configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH +- +-_LT_DECL([], [variables_saved_for_relink], [1], +- [Variables whose values should be saved in libtool wrapper scripts and +- restored at link time]) +-_LT_DECL([], [need_lib_prefix], [0], +- [Do we need the "lib" prefix for modules?]) +-_LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) +-_LT_DECL([], [version_type], [0], [Library versioning type]) +-_LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) +-_LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) +-_LT_DECL([], [shlibpath_overrides_runpath], [0], +- [Is shlibpath searched before the hard-coded library search path?]) +-_LT_DECL([], [libname_spec], [1], [Format of library name prefix]) +-_LT_DECL([], [library_names_spec], [1], +- [[List of archive names. First name is the real one, the rest are links. +- The last name is the one that the linker finds with -lNAME]]) +-_LT_DECL([], [soname_spec], [1], +- [[The coded name of the library, if different from the real name]]) +-_LT_DECL([], [install_override_mode], [1], +- [Permission mode override for installation of shared libraries]) +-_LT_DECL([], [postinstall_cmds], [2], +- [Command to use after installation of a shared archive]) +-_LT_DECL([], [postuninstall_cmds], [2], +- [Command to use after uninstallation of a shared archive]) +-_LT_DECL([], [finish_cmds], [2], +- [Commands used to finish a libtool library installation in a directory]) +-_LT_DECL([], [finish_eval], [1], +- [[As "finish_cmds", except a single script fragment to be evaled but +- not shown]]) +-_LT_DECL([], [hardcode_into_libs], [0], +- [Whether we should hardcode library paths into libraries]) +-_LT_DECL([], [sys_lib_search_path_spec], [2], +- [Compile-time system search path for libraries]) +-_LT_DECL([sys_lib_dlsearch_path_spec], [configure_time_dlsearch_path], [2], +- [Detected run-time system search path for libraries]) +-_LT_DECL([], [configure_time_lt_sys_library_path], [2], +- [Explicit LT_SYS_LIBRARY_PATH set during ./configure time]) +-])# _LT_SYS_DYNAMIC_LINKER +- +- +-# _LT_PATH_TOOL_PREFIX(TOOL) +-# -------------------------- +-# find a file program that can recognize shared library +-AC_DEFUN([_LT_PATH_TOOL_PREFIX], +-[m4_require([_LT_DECL_EGREP])dnl +-AC_MSG_CHECKING([for $1]) +-AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, +-[case $MAGIC_CMD in +-[[\\/*] | ?:[\\/]*]) +- lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. +- ;; +-*) +- lt_save_MAGIC_CMD=$MAGIC_CMD +- lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR +-dnl $ac_dummy forces splitting on constant user-supplied paths. +-dnl POSIX.2 word splitting is done only on the output of word expansions, +-dnl not every word. This closes a longstanding sh security hole. +- ac_dummy="m4_if([$2], , $PATH, [$2])" +- for ac_dir in $ac_dummy; do +- IFS=$lt_save_ifs +- test -z "$ac_dir" && ac_dir=. +- if test -f "$ac_dir/$1"; then +- lt_cv_path_MAGIC_CMD=$ac_dir/"$1" +- if test -n "$file_magic_test_file"; then +- case $deplibs_check_method in +- "file_magic "*) +- file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` +- MAGIC_CMD=$lt_cv_path_MAGIC_CMD +- if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | +- $EGREP "$file_magic_regex" > /dev/null; then +- : +- else +- cat <<_LT_EOF 1>&2 +- +-*** Warning: the command libtool uses to detect shared libraries, +-*** $file_magic_cmd, produces output that libtool cannot recognize. +-*** The result is that libtool may fail to recognize shared libraries +-*** as such. This will affect the creation of libtool libraries that +-*** depend on shared libraries, but programs linked with such libtool +-*** libraries will work regardless of this problem. Nevertheless, you +-*** may want to report the problem to your system manager and/or to +-*** bug-libtool@gnu.org +- +-_LT_EOF +- fi ;; +- esac +- fi +- break +- fi +- done +- IFS=$lt_save_ifs +- MAGIC_CMD=$lt_save_MAGIC_CMD +- ;; +-esac]) +-MAGIC_CMD=$lt_cv_path_MAGIC_CMD +-if test -n "$MAGIC_CMD"; then +- AC_MSG_RESULT($MAGIC_CMD) +-else +- AC_MSG_RESULT(no) +-fi +-_LT_DECL([], [MAGIC_CMD], [0], +- [Used to examine libraries when file_magic_cmd begins with "file"])dnl +-])# _LT_PATH_TOOL_PREFIX +- +-# Old name: +-AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) +- +- +-# _LT_PATH_MAGIC +-# -------------- +-# find a file program that can recognize a shared library +-m4_defun([_LT_PATH_MAGIC], +-[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) +-if test -z "$lt_cv_path_MAGIC_CMD"; then +- if test -n "$ac_tool_prefix"; then +- _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) +- else +- MAGIC_CMD=: +- fi +-fi +-])# _LT_PATH_MAGIC +- +- +-# LT_PATH_LD +-# ---------- +-# find the pathname to the GNU or non-GNU linker +-AC_DEFUN([LT_PATH_LD], +-[AC_REQUIRE([AC_PROG_CC])dnl +-AC_REQUIRE([AC_CANONICAL_HOST])dnl +-AC_REQUIRE([AC_CANONICAL_BUILD])dnl +-m4_require([_LT_DECL_SED])dnl +-m4_require([_LT_DECL_EGREP])dnl +-m4_require([_LT_PROG_ECHO_BACKSLASH])dnl +- +-AC_ARG_WITH([gnu-ld], +- [AS_HELP_STRING([--with-gnu-ld], +- [assume the C compiler uses GNU ld @<:@default=no@:>@])], +- [test no = "$withval" || with_gnu_ld=yes], +- [with_gnu_ld=no])dnl +- +-ac_prog=ld +-if test yes = "$GCC"; then +- # Check if gcc -print-prog-name=ld gives a path. +- AC_MSG_CHECKING([for ld used by $CC]) +- case $host in +- *-*-mingw*) +- # gcc leaves a trailing carriage return, which upsets mingw +- ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; +- *) +- ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; +- esac +- case $ac_prog in +- # Accept absolute paths. +- [[\\/]]* | ?:[[\\/]]*) +- re_direlt='/[[^/]][[^/]]*/\.\./' +- # Canonicalize the pathname of ld +- ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` +- while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do +- ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` +- done +- test -z "$LD" && LD=$ac_prog +- ;; +- "") +- # If it fails, then pretend we aren't using GCC. +- ac_prog=ld +- ;; +- *) +- # If it is relative, then search for the first ld in PATH. +- with_gnu_ld=unknown +- ;; +- esac +-elif test yes = "$with_gnu_ld"; then +- AC_MSG_CHECKING([for GNU ld]) +-else +- AC_MSG_CHECKING([for non-GNU ld]) +-fi +-AC_CACHE_VAL(lt_cv_path_LD, +-[if test -z "$LD"; then +- lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR +- for ac_dir in $PATH; do +- IFS=$lt_save_ifs +- test -z "$ac_dir" && ac_dir=. +- if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then +- lt_cv_path_LD=$ac_dir/$ac_prog +- # Check to see if the program is GNU ld. I'd rather use --version, +- # but apparently some variants of GNU ld only accept -v. +- # Break only if it was the GNU/non-GNU ld that we prefer. +- case `"$lt_cv_path_LD" -v 2>&1 &1 conftest.i +-cat conftest.i conftest.i >conftest2.i +-: ${lt_DD:=$DD} +-AC_PATH_PROGS_FEATURE_CHECK([lt_DD], [dd], +-[if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then +- cmp -s conftest.i conftest.out \ +- && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: +-fi]) +-rm -f conftest.i conftest2.i conftest.out]) +-])# _LT_PATH_DD +- +- +-# _LT_CMD_TRUNCATE +-# ---------------- +-# find command to truncate a binary pipe +-m4_defun([_LT_CMD_TRUNCATE], +-[m4_require([_LT_PATH_DD]) +-AC_CACHE_CHECK([how to truncate binary pipes], [lt_cv_truncate_bin], +-[printf 0123456789abcdef0123456789abcdef >conftest.i +-cat conftest.i conftest.i >conftest2.i +-lt_cv_truncate_bin= +-if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then +- cmp -s conftest.i conftest.out \ +- && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" +-fi +-rm -f conftest.i conftest2.i conftest.out +-test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q"]) +-_LT_DECL([lt_truncate_bin], [lt_cv_truncate_bin], [1], +- [Command to truncate a binary pipe]) +-])# _LT_CMD_TRUNCATE +- +- +-# _LT_CHECK_MAGIC_METHOD +-# ---------------------- +-# how to check for library dependencies +-# -- PORTME fill in with the dynamic library characteristics +-m4_defun([_LT_CHECK_MAGIC_METHOD], +-[m4_require([_LT_DECL_EGREP]) +-m4_require([_LT_DECL_OBJDUMP]) +-AC_CACHE_CHECK([how to recognize dependent libraries], +-lt_cv_deplibs_check_method, +-[lt_cv_file_magic_cmd='$MAGIC_CMD' +-lt_cv_file_magic_test_file= +-lt_cv_deplibs_check_method='unknown' +-# Need to set the preceding variable on all platforms that support +-# interlibrary dependencies. +-# 'none' -- dependencies not supported. +-# 'unknown' -- same as none, but documents that we really don't know. +-# 'pass_all' -- all dependencies passed with no checks. +-# 'test_compile' -- check by making test program. +-# 'file_magic [[regex]]' -- check by looking for files in library path +-# that responds to the $file_magic_cmd with a given extended regex. +-# If you have 'file' or equivalent on your system and you're not sure +-# whether 'pass_all' will *always* work, you probably want this one. +- +-case $host_os in +-aix[[4-9]]*) +- lt_cv_deplibs_check_method=pass_all +- ;; +- +-beos*) +- lt_cv_deplibs_check_method=pass_all +- ;; +- +-bsdi[[45]]*) +- lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)' +- lt_cv_file_magic_cmd='$FILECMD -L' +- lt_cv_file_magic_test_file=/shlib/libc.so +- ;; +- +-cygwin*) +- # func_win32_libid is a shell function defined in ltmain.sh +- lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' +- lt_cv_file_magic_cmd='func_win32_libid' +- ;; +- +-mingw* | pw32*) +- # Base MSYS/MinGW do not provide the 'file' command needed by +- # func_win32_libid shell function, so use a weaker test based on 'objdump', +- # unless we find 'file', for example because we are cross-compiling. +- if ( file / ) >/dev/null 2>&1; then +- lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' +- lt_cv_file_magic_cmd='func_win32_libid' +- else +- # Keep this pattern in sync with the one in func_win32_libid. +- lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' +- lt_cv_file_magic_cmd='$OBJDUMP -f' +- fi +- ;; +- +-cegcc*) +- # use the weaker test based on 'objdump'. See mingw*. +- lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' +- lt_cv_file_magic_cmd='$OBJDUMP -f' +- ;; +- +-darwin* | rhapsody*) +- lt_cv_deplibs_check_method=pass_all +- ;; +- +-freebsd* | dragonfly* | midnightbsd*) +- if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then +- case $host_cpu in +- i*86 ) +- # Not sure whether the presence of OpenBSD here was a mistake. +- # Let's accept both of them until this is cleared up. +- lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' +- lt_cv_file_magic_cmd=$FILECMD +- lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` +- ;; +- esac +- else +- lt_cv_deplibs_check_method=pass_all +- fi +- ;; +- +-haiku*) +- lt_cv_deplibs_check_method=pass_all +- ;; +- +-hpux10.20* | hpux11*) +- lt_cv_file_magic_cmd=$FILECMD +- case $host_cpu in +- ia64*) +- lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' +- lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so +- ;; +- hppa*64*) +- [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'] +- lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl +- ;; +- *) +- lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library' +- lt_cv_file_magic_test_file=/usr/lib/libc.sl +- ;; +- esac +- ;; +- +-interix[[3-9]]*) +- # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here +- lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' +- ;; +- +-irix5* | irix6* | nonstopux*) +- case $LD in +- *-32|*"-32 ") libmagic=32-bit;; +- *-n32|*"-n32 ") libmagic=N32;; +- *-64|*"-64 ") libmagic=64-bit;; +- *) libmagic=never-match;; +- esac +- lt_cv_deplibs_check_method=pass_all +- ;; +- +-# This must be glibc/ELF. +-linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) +- lt_cv_deplibs_check_method=pass_all +- ;; +- +-netbsd*) +- if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then +- lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' +- else +- lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' +- fi +- ;; +- +-newos6*) +- lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' +- lt_cv_file_magic_cmd=$FILECMD +- lt_cv_file_magic_test_file=/usr/lib/libnls.so +- ;; +- +-*nto* | *qnx*) +- lt_cv_deplibs_check_method=pass_all +- ;; +- +-openbsd* | bitrig*) +- if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then +- lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' +- else +- lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' +- fi +- ;; +- +-osf3* | osf4* | osf5*) +- lt_cv_deplibs_check_method=pass_all +- ;; +- +-rdos*) +- lt_cv_deplibs_check_method=pass_all +- ;; +- +-solaris*) +- lt_cv_deplibs_check_method=pass_all +- ;; +- +-sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) +- lt_cv_deplibs_check_method=pass_all +- ;; +- +-sysv4 | sysv4.3*) +- case $host_vendor in +- motorola) +- lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' +- lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` +- ;; +- ncr) +- lt_cv_deplibs_check_method=pass_all +- ;; +- sequent) +- lt_cv_file_magic_cmd='/bin/file' +- lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' +- ;; +- sni) +- lt_cv_file_magic_cmd='/bin/file' +- lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" +- lt_cv_file_magic_test_file=/lib/libc.so +- ;; +- siemens) +- lt_cv_deplibs_check_method=pass_all +- ;; +- pc) +- lt_cv_deplibs_check_method=pass_all +- ;; +- esac +- ;; +- +-tpf*) +- lt_cv_deplibs_check_method=pass_all +- ;; +-os2*) +- lt_cv_deplibs_check_method=pass_all +- ;; +-esac +-]) +- +-file_magic_glob= +-want_nocaseglob=no +-if test "$build" = "$host"; then +- case $host_os in +- mingw* | pw32*) +- if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then +- want_nocaseglob=yes +- else +- file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` +- fi +- ;; +- esac +-fi +- +-file_magic_cmd=$lt_cv_file_magic_cmd +-deplibs_check_method=$lt_cv_deplibs_check_method +-test -z "$deplibs_check_method" && deplibs_check_method=unknown +- +-_LT_DECL([], [deplibs_check_method], [1], +- [Method to check whether dependent libraries are shared objects]) +-_LT_DECL([], [file_magic_cmd], [1], +- [Command to use when deplibs_check_method = "file_magic"]) +-_LT_DECL([], [file_magic_glob], [1], +- [How to find potential files when deplibs_check_method = "file_magic"]) +-_LT_DECL([], [want_nocaseglob], [1], +- [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) +-])# _LT_CHECK_MAGIC_METHOD +- +- +-# LT_PATH_NM +-# ---------- +-# find the pathname to a BSD- or MS-compatible name lister +-AC_DEFUN([LT_PATH_NM], +-[AC_REQUIRE([AC_PROG_CC])dnl +-AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, +-[if test -n "$NM"; then +- # Let the user override the test. +- lt_cv_path_NM=$NM +-else +- lt_nm_to_check=${ac_tool_prefix}nm +- if test -n "$ac_tool_prefix" && test "$build" = "$host"; then +- lt_nm_to_check="$lt_nm_to_check nm" +- fi +- for lt_tmp_nm in $lt_nm_to_check; do +- lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR +- for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do +- IFS=$lt_save_ifs +- test -z "$ac_dir" && ac_dir=. +- tmp_nm=$ac_dir/$lt_tmp_nm +- if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then +- # Check to see if the nm accepts a BSD-compat flag. +- # Adding the 'sed 1q' prevents false positives on HP-UX, which says: +- # nm: unknown option "B" ignored +- # Tru64's nm complains that /dev/null is an invalid object file +- # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty +- case $build_os in +- mingw*) lt_bad_file=conftest.nm/nofile ;; +- *) lt_bad_file=/dev/null ;; +- esac +- case `"$tmp_nm" -B $lt_bad_file 2>&1 | $SED '1q'` in +- *$lt_bad_file* | *'Invalid file or object type'*) +- lt_cv_path_NM="$tmp_nm -B" +- break 2 +- ;; +- *) +- case `"$tmp_nm" -p /dev/null 2>&1 | $SED '1q'` in +- */dev/null*) +- lt_cv_path_NM="$tmp_nm -p" +- break 2 +- ;; +- *) +- lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but +- continue # so that we can try to find one that supports BSD flags +- ;; +- esac +- ;; +- esac +- fi +- done +- IFS=$lt_save_ifs +- done +- : ${lt_cv_path_NM=no} +-fi]) +-if test no != "$lt_cv_path_NM"; then +- NM=$lt_cv_path_NM +-else +- # Didn't find any BSD compatible name lister, look for dumpbin. +- if test -n "$DUMPBIN"; then : +- # Let the user override the test. +- else +- AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :) +- case `$DUMPBIN -symbols -headers /dev/null 2>&1 | $SED '1q'` in +- *COFF*) +- DUMPBIN="$DUMPBIN -symbols -headers" +- ;; +- *) +- DUMPBIN=: +- ;; +- esac +- fi +- AC_SUBST([DUMPBIN]) +- if test : != "$DUMPBIN"; then +- NM=$DUMPBIN +- fi +-fi +-test -z "$NM" && NM=nm +-AC_SUBST([NM]) +-_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl +- +-AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], +- [lt_cv_nm_interface="BSD nm" +- echo "int some_variable = 0;" > conftest.$ac_ext +- (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD) +- (eval "$ac_compile" 2>conftest.err) +- cat conftest.err >&AS_MESSAGE_LOG_FD +- (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) +- (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) +- cat conftest.err >&AS_MESSAGE_LOG_FD +- (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD) +- cat conftest.out >&AS_MESSAGE_LOG_FD +- if $GREP 'External.*some_variable' conftest.out > /dev/null; then +- lt_cv_nm_interface="MS dumpbin" +- fi +- rm -f conftest*]) +-])# LT_PATH_NM +- +-# Old names: +-AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) +-AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AM_PROG_NM], []) +-dnl AC_DEFUN([AC_PROG_NM], []) +- +-# _LT_CHECK_SHAREDLIB_FROM_LINKLIB +-# -------------------------------- +-# how to determine the name of the shared library +-# associated with a specific link library. +-# -- PORTME fill in with the dynamic library characteristics +-m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], +-[m4_require([_LT_DECL_EGREP]) +-m4_require([_LT_DECL_OBJDUMP]) +-m4_require([_LT_DECL_DLLTOOL]) +-AC_CACHE_CHECK([how to associate runtime and link libraries], +-lt_cv_sharedlib_from_linklib_cmd, +-[lt_cv_sharedlib_from_linklib_cmd='unknown' +- +-case $host_os in +-cygwin* | mingw* | pw32* | cegcc*) +- # two different shell functions defined in ltmain.sh; +- # decide which one to use based on capabilities of $DLLTOOL +- case `$DLLTOOL --help 2>&1` in +- *--identify-strict*) +- lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib +- ;; +- *) +- lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback +- ;; +- esac +- ;; +-*) +- # fallback: assume linklib IS sharedlib +- lt_cv_sharedlib_from_linklib_cmd=$ECHO +- ;; +-esac +-]) +-sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +-test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO +- +-_LT_DECL([], [sharedlib_from_linklib_cmd], [1], +- [Command to associate shared and link libraries]) +-])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB +- +- +-# _LT_PATH_MANIFEST_TOOL +-# ---------------------- +-# locate the manifest tool +-m4_defun([_LT_PATH_MANIFEST_TOOL], +-[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) +-test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +-AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], +- [lt_cv_path_mainfest_tool=no +- echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD +- $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out +- cat conftest.err >&AS_MESSAGE_LOG_FD +- if $GREP 'Manifest Tool' conftest.out > /dev/null; then +- lt_cv_path_mainfest_tool=yes +- fi +- rm -f conftest*]) +-if test yes != "$lt_cv_path_mainfest_tool"; then +- MANIFEST_TOOL=: +-fi +-_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl +-])# _LT_PATH_MANIFEST_TOOL +- +- +-# _LT_DLL_DEF_P([FILE]) +-# --------------------- +-# True iff FILE is a Windows DLL '.def' file. +-# Keep in sync with func_dll_def_p in the libtool script +-AC_DEFUN([_LT_DLL_DEF_P], +-[dnl +- test DEF = "`$SED -n dnl +- -e '\''s/^[[ ]]*//'\'' dnl Strip leading whitespace +- -e '\''/^\(;.*\)*$/d'\'' dnl Delete empty lines and comments +- -e '\''s/^\(EXPORTS\|LIBRARY\)\([[ ]].*\)*$/DEF/p'\'' dnl +- -e q dnl Only consider the first "real" line +- $1`" dnl +-])# _LT_DLL_DEF_P +- +- +-# LT_LIB_M +-# -------- +-# check for math library +-AC_DEFUN([LT_LIB_M], +-[AC_REQUIRE([AC_CANONICAL_HOST])dnl +-LIBM= +-case $host in +-*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*) +- # These system don't have libm, or don't need it +- ;; +-*-ncr-sysv4.3*) +- AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM=-lmw) +- AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") +- ;; +-*) +- AC_CHECK_LIB(m, cos, LIBM=-lm) +- ;; +-esac +-AC_SUBST([LIBM]) +-])# LT_LIB_M +- +-# Old name: +-AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AC_CHECK_LIBM], []) +- +- +-# _LT_COMPILER_NO_RTTI([TAGNAME]) +-# ------------------------------- +-m4_defun([_LT_COMPILER_NO_RTTI], +-[m4_require([_LT_TAG_COMPILER])dnl +- +-_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= +- +-if test yes = "$GCC"; then +- case $cc_basename in +- nvcc*) +- _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;; +- *) +- _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;; +- esac +- +- _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], +- lt_cv_prog_compiler_rtti_exceptions, +- [-fno-rtti -fno-exceptions], [], +- [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) +-fi +-_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], +- [Compiler flag to turn off builtin functions]) +-])# _LT_COMPILER_NO_RTTI +- +- +-# _LT_CMD_GLOBAL_SYMBOLS +-# ---------------------- +-m4_defun([_LT_CMD_GLOBAL_SYMBOLS], +-[AC_REQUIRE([AC_CANONICAL_HOST])dnl +-AC_REQUIRE([AC_PROG_CC])dnl +-AC_REQUIRE([AC_PROG_AWK])dnl +-AC_REQUIRE([LT_PATH_NM])dnl +-AC_REQUIRE([LT_PATH_LD])dnl +-m4_require([_LT_DECL_SED])dnl +-m4_require([_LT_DECL_EGREP])dnl +-m4_require([_LT_TAG_COMPILER])dnl +- +-# Check for command to grab the raw symbol name followed by C symbol from nm. +-AC_MSG_CHECKING([command to parse $NM output from $compiler object]) +-AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], +-[ +-# These are sane defaults that work on at least a few old systems. +-# [They come from Ultrix. What could be older than Ultrix?!! ;)] +- +-# Character class describing NM global symbol codes. +-symcode='[[BCDEGRST]]' +- +-# Regexp to match symbols that can be accessed directly from C. +-sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' +- +-# Define system-specific variables. +-case $host_os in +-aix*) +- symcode='[[BCDT]]' +- ;; +-cygwin* | mingw* | pw32* | cegcc*) +- symcode='[[ABCDGISTW]]' +- ;; +-hpux*) +- if test ia64 = "$host_cpu"; then +- symcode='[[ABCDEGRST]]' +- fi +- ;; +-irix* | nonstopux*) +- symcode='[[BCDEGRST]]' +- ;; +-osf*) +- symcode='[[BCDEGQRST]]' +- ;; +-solaris*) +- symcode='[[BDRT]]' +- ;; +-sco3.2v5*) +- symcode='[[DT]]' +- ;; +-sysv4.2uw2*) +- symcode='[[DT]]' +- ;; +-sysv5* | sco5v6* | unixware* | OpenUNIX*) +- symcode='[[ABDT]]' +- ;; +-sysv4) +- symcode='[[DFNSTU]]' +- ;; +-esac +- +-# If we're using GNU nm, then use its standard symbol codes. +-case `$NM -V 2>&1` in +-*GNU* | *'with BFD'*) +- symcode='[[ABCDGIRSTW]]' ;; +-esac +- +-if test "$lt_cv_nm_interface" = "MS dumpbin"; then +- # Gets list of data symbols to import. +- lt_cv_sys_global_symbol_to_import="$SED -n -e 's/^I .* \(.*\)$/\1/p'" +- # Adjust the below global symbol transforms to fixup imported variables. +- lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" +- lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" +- lt_c_name_lib_hook="\ +- -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ +- -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" +-else +- # Disable hooks by default. +- lt_cv_sys_global_symbol_to_import= +- lt_cdecl_hook= +- lt_c_name_hook= +- lt_c_name_lib_hook= +-fi +- +-# Transform an extracted symbol line into a proper C declaration. +-# Some systems (esp. on ia64) link data and code symbols differently, +-# so use this general approach. +-lt_cv_sys_global_symbol_to_cdecl="$SED -n"\ +-$lt_cdecl_hook\ +-" -e 's/^T .* \(.*\)$/extern int \1();/p'"\ +-" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" +- +-# Transform an extracted symbol line into symbol name and symbol address +-lt_cv_sys_global_symbol_to_c_name_address="$SED -n"\ +-$lt_c_name_hook\ +-" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +-" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" +- +-# Transform an extracted symbol line into symbol name with lib prefix and +-# symbol address. +-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="$SED -n"\ +-$lt_c_name_lib_hook\ +-" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +-" -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ +-" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" +- +-# Handle CRLF in mingw tool chain +-opt_cr= +-case $build_os in +-mingw*) +- opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp +- ;; +-esac +- +-# Try without a prefix underscore, then with it. +-for ac_symprfx in "" "_"; do +- +- # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. +- symxfrm="\\1 $ac_symprfx\\2 \\2" +- +- # Write the raw and C identifiers. +- if test "$lt_cv_nm_interface" = "MS dumpbin"; then +- # Fake it for dumpbin and say T for any non-static function, +- # D for any global variable and I for any imported variable. +- # Also find C++ and __fastcall symbols from MSVC++ or ICC, +- # which start with @ or ?. +- lt_cv_sys_global_symbol_pipe="$AWK ['"\ +-" {last_section=section; section=\$ 3};"\ +-" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ +-" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +-" /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ +-" /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ +-" /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ +-" \$ 0!~/External *\|/{next};"\ +-" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +-" {if(hide[section]) next};"\ +-" {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ +-" {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ +-" s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ +-" s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ +-" ' prfx=^$ac_symprfx]" +- else +- lt_cv_sys_global_symbol_pipe="$SED -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" +- fi +- lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | $SED '/ __gnu_lto/d'" +- +- # Check to see that the pipe works correctly. +- pipe_works=no +- +- rm -f conftest* +- cat > conftest.$ac_ext <<_LT_EOF +-#ifdef __cplusplus +-extern "C" { +-#endif +-char nm_test_var; +-void nm_test_func(void); +-void nm_test_func(void){} +-#ifdef __cplusplus +-} +-#endif +-int main(){nm_test_var='a';nm_test_func();return(0);} +-_LT_EOF +- +- if AC_TRY_EVAL(ac_compile); then +- # Now try to grab the symbols. +- nlist=conftest.nm +- if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then +- # Try sorting and uniquifying the output. +- if sort "$nlist" | uniq > "$nlist"T; then +- mv -f "$nlist"T "$nlist" +- else +- rm -f "$nlist"T +- fi +- +- # Make sure that we snagged all the symbols we need. +- if $GREP ' nm_test_var$' "$nlist" >/dev/null; then +- if $GREP ' nm_test_func$' "$nlist" >/dev/null; then +- cat <<_LT_EOF > conftest.$ac_ext +-/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +-#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +-/* DATA imports from DLLs on WIN32 can't be const, because runtime +- relocations are performed -- see ld's documentation on pseudo-relocs. */ +-# define LT@&t@_DLSYM_CONST +-#elif defined __osf__ +-/* This system does not cope well with relocations in const data. */ +-# define LT@&t@_DLSYM_CONST +-#else +-# define LT@&t@_DLSYM_CONST const +-#endif +- +-#ifdef __cplusplus +-extern "C" { +-#endif +- +-_LT_EOF +- # Now generate the symbol file. +- eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' +- +- cat <<_LT_EOF >> conftest.$ac_ext +- +-/* The mapping between symbol names and symbols. */ +-LT@&t@_DLSYM_CONST struct { +- const char *name; +- void *address; +-} +-lt__PROGRAM__LTX_preloaded_symbols[[]] = +-{ +- { "@PROGRAM@", (void *) 0 }, +-_LT_EOF +- $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext +- cat <<\_LT_EOF >> conftest.$ac_ext +- {0, (void *) 0} +-}; +- +-/* This works around a problem in FreeBSD linker */ +-#ifdef FREEBSD_WORKAROUND +-static const void *lt_preloaded_setup() { +- return lt__PROGRAM__LTX_preloaded_symbols; +-} +-#endif +- +-#ifdef __cplusplus +-} +-#endif +-_LT_EOF +- # Now try linking the two files. +- mv conftest.$ac_objext conftstm.$ac_objext +- lt_globsym_save_LIBS=$LIBS +- lt_globsym_save_CFLAGS=$CFLAGS +- LIBS=conftstm.$ac_objext +- CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" +- if AC_TRY_EVAL(ac_link) && test -s conftest$ac_exeext; then +- pipe_works=yes +- fi +- LIBS=$lt_globsym_save_LIBS +- CFLAGS=$lt_globsym_save_CFLAGS +- else +- echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD +- fi +- else +- echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD +- fi +- else +- echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD +- fi +- else +- echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD +- cat conftest.$ac_ext >&5 +- fi +- rm -rf conftest* conftst* +- +- # Do not use the global_symbol_pipe unless it works. +- if test yes = "$pipe_works"; then +- break +- else +- lt_cv_sys_global_symbol_pipe= +- fi +-done +-]) +-if test -z "$lt_cv_sys_global_symbol_pipe"; then +- lt_cv_sys_global_symbol_to_cdecl= +-fi +-if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then +- AC_MSG_RESULT(failed) +-else +- AC_MSG_RESULT(ok) +-fi +- +-# Response file support. +-if test "$lt_cv_nm_interface" = "MS dumpbin"; then +- nm_file_list_spec='@' +-elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then +- nm_file_list_spec='@' +-fi +- +-_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], +- [Take the output of nm and produce a listing of raw symbols and C names]) +-_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], +- [Transform the output of nm in a proper C declaration]) +-_LT_DECL([global_symbol_to_import], [lt_cv_sys_global_symbol_to_import], [1], +- [Transform the output of nm into a list of symbols to manually relocate]) +-_LT_DECL([global_symbol_to_c_name_address], +- [lt_cv_sys_global_symbol_to_c_name_address], [1], +- [Transform the output of nm in a C name address pair]) +-_LT_DECL([global_symbol_to_c_name_address_lib_prefix], +- [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], +- [Transform the output of nm in a C name address pair when lib prefix is needed]) +-_LT_DECL([nm_interface], [lt_cv_nm_interface], [1], +- [The name lister interface]) +-_LT_DECL([], [nm_file_list_spec], [1], +- [Specify filename containing input files for $NM]) +-]) # _LT_CMD_GLOBAL_SYMBOLS +- +- +-# _LT_COMPILER_PIC([TAGNAME]) +-# --------------------------- +-m4_defun([_LT_COMPILER_PIC], +-[m4_require([_LT_TAG_COMPILER])dnl +-_LT_TAGVAR(lt_prog_compiler_wl, $1)= +-_LT_TAGVAR(lt_prog_compiler_pic, $1)= +-_LT_TAGVAR(lt_prog_compiler_static, $1)= +- +-m4_if([$1], [CXX], [ +- # C++ specific cases for pic, static, wl, etc. +- if test yes = "$GXX"; then +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' +- +- case $host_os in +- aix*) +- # All AIX code is PIC. +- if test ia64 = "$host_cpu"; then +- # AIX 5 now supports IA64 processor +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- fi +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' +- ;; +- +- amigaos*) +- case $host_cpu in +- powerpc) +- # see comment about AmigaOS4 .so support +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' +- ;; +- m68k) +- # FIXME: we need at least 68020 code to build shared libraries, but +- # adding the '-m68020' flag to GCC prevents building anything better, +- # like '-m68040'. +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' +- ;; +- esac +- ;; +- +- beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) +- # PIC is the default for these OSes. +- ;; +- mingw* | cygwin* | os2* | pw32* | cegcc*) +- # This hack is so that the source file can tell whether it is being +- # built for inclusion in a dll (and should export symbols for example). +- # Although the cygwin gcc ignores -fPIC, still need this for old-style +- # (--disable-auto-import) libraries +- m4_if([$1], [GCJ], [], +- [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) +- case $host_os in +- os2*) +- _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' +- ;; +- esac +- ;; +- darwin* | rhapsody*) +- # PIC is the default on this platform +- # Common symbols not allowed in MH_DYLIB files +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' +- ;; +- *djgpp*) +- # DJGPP does not support shared libraries at all +- _LT_TAGVAR(lt_prog_compiler_pic, $1)= +- ;; +- haiku*) +- # PIC is the default for Haiku. +- # The "-static" flag exists, but is broken. +- _LT_TAGVAR(lt_prog_compiler_static, $1)= +- ;; +- interix[[3-9]]*) +- # Interix 3.x gcc -fpic/-fPIC options generate broken code. +- # Instead, we relocate shared libraries at runtime. +- ;; +- sysv4*MP*) +- if test -d /usr/nec; then +- _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic +- fi +- ;; +- hpux*) +- # PIC is the default for 64-bit PA HP-UX, but not for 32-bit +- # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag +- # sets the default TLS model and affects inlining. +- case $host_cpu in +- hppa*64*) +- ;; +- *) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' +- ;; +- esac +- ;; +- *qnx* | *nto*) +- # QNX uses GNU C++, but need to define -shared option too, otherwise +- # it will coredump. +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' +- ;; +- *) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' +- ;; +- esac +- else +- case $host_os in +- aix[[4-9]]*) +- # All AIX code is PIC. +- if test ia64 = "$host_cpu"; then +- # AIX 5 now supports IA64 processor +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- else +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' +- fi +- ;; +- chorus*) +- case $cc_basename in +- cxch68*) +- # Green Hills C++ Compiler +- # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" +- ;; +- esac +- ;; +- mingw* | cygwin* | os2* | pw32* | cegcc*) +- # This hack is so that the source file can tell whether it is being +- # built for inclusion in a dll (and should export symbols for example). +- m4_if([$1], [GCJ], [], +- [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) +- ;; +- dgux*) +- case $cc_basename in +- ec++*) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- ;; +- ghcx*) +- # Green Hills C++ Compiler +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' +- ;; +- *) +- ;; +- esac +- ;; +- freebsd* | dragonfly* | midnightbsd*) +- # FreeBSD uses GNU C++ +- ;; +- hpux9* | hpux10* | hpux11*) +- case $cc_basename in +- CC*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' +- if test ia64 != "$host_cpu"; then +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' +- fi +- ;; +- aCC*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' +- case $host_cpu in +- hppa*64*|ia64*) +- # +Z the default +- ;; +- *) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' +- ;; +- esac +- ;; +- *) +- ;; +- esac +- ;; +- interix*) +- # This is c89, which is MS Visual C++ (no shared libs) +- # Anyone wants to do a port? +- ;; +- irix5* | irix6* | nonstopux*) +- case $cc_basename in +- CC*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' +- # CC pic flag -KPIC is the default. +- ;; +- *) +- ;; +- esac +- ;; +- linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) +- case $cc_basename in +- KCC*) +- # KAI C++ Compiler +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' +- ;; +- ecpc* ) +- # old Intel C++ for x86_64, which still supported -KPIC. +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' +- ;; +- icpc* ) +- # Intel C++, used to be incompatible with GCC. +- # ICC 10 doesn't accept -KPIC any more. +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' +- ;; +- pgCC* | pgcpp*) +- # Portland Group C++ compiler +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- ;; +- cxx*) +- # Compaq C++ +- # Make sure the PIC flag is empty. It appears that all Alpha +- # Linux and Compaq Tru64 Unix objects are PIC. +- _LT_TAGVAR(lt_prog_compiler_pic, $1)= +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' +- ;; +- xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*) +- # IBM XL 8.0, 9.0 on PPC and BlueGene +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' +- ;; +- *) +- case `$CC -V 2>&1 | $SED 5q` in +- *Sun\ C*) +- # Sun C++ 5.9 +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' +- ;; +- esac +- ;; +- esac +- ;; +- lynxos*) +- ;; +- m88k*) +- ;; +- mvs*) +- case $cc_basename in +- cxx*) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' +- ;; +- *) +- ;; +- esac +- ;; +- netbsd*) +- ;; +- *qnx* | *nto*) +- # QNX uses GNU C++, but need to define -shared option too, otherwise +- # it will coredump. +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' +- ;; +- osf3* | osf4* | osf5*) +- case $cc_basename in +- KCC*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' +- ;; +- RCC*) +- # Rational C++ 2.4.1 +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' +- ;; +- cxx*) +- # Digital/Compaq C++ +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- # Make sure the PIC flag is empty. It appears that all Alpha +- # Linux and Compaq Tru64 Unix objects are PIC. +- _LT_TAGVAR(lt_prog_compiler_pic, $1)= +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' +- ;; +- *) +- ;; +- esac +- ;; +- psos*) +- ;; +- solaris*) +- case $cc_basename in +- CC* | sunCC*) +- # Sun C++ 4.2, 5.x and Centerline C++ +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' +- ;; +- gcx*) +- # Green Hills C++ Compiler +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' +- ;; +- *) +- ;; +- esac +- ;; +- sunos4*) +- case $cc_basename in +- CC*) +- # Sun C++ 4.x +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- ;; +- lcc*) +- # Lucid +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' +- ;; +- *) +- ;; +- esac +- ;; +- sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) +- case $cc_basename in +- CC*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- ;; +- esac +- ;; +- tandem*) +- case $cc_basename in +- NCC*) +- # NonStop-UX NCC 3.20 +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- ;; +- *) +- ;; +- esac +- ;; +- vxworks*) +- ;; +- *) +- _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no +- ;; +- esac +- fi +-], +-[ +- if test yes = "$GCC"; then +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' +- +- case $host_os in +- aix*) +- # All AIX code is PIC. +- if test ia64 = "$host_cpu"; then +- # AIX 5 now supports IA64 processor +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- fi +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' +- ;; +- +- amigaos*) +- case $host_cpu in +- powerpc) +- # see comment about AmigaOS4 .so support +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' +- ;; +- m68k) +- # FIXME: we need at least 68020 code to build shared libraries, but +- # adding the '-m68020' flag to GCC prevents building anything better, +- # like '-m68040'. +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' +- ;; +- esac +- ;; +- +- beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) +- # PIC is the default for these OSes. +- ;; +- +- mingw* | cygwin* | pw32* | os2* | cegcc*) +- # This hack is so that the source file can tell whether it is being +- # built for inclusion in a dll (and should export symbols for example). +- # Although the cygwin gcc ignores -fPIC, still need this for old-style +- # (--disable-auto-import) libraries +- m4_if([$1], [GCJ], [], +- [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) +- case $host_os in +- os2*) +- _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' +- ;; +- esac +- ;; +- +- darwin* | rhapsody*) +- # PIC is the default on this platform +- # Common symbols not allowed in MH_DYLIB files +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' +- ;; +- +- haiku*) +- # PIC is the default for Haiku. +- # The "-static" flag exists, but is broken. +- _LT_TAGVAR(lt_prog_compiler_static, $1)= +- ;; +- +- hpux*) +- # PIC is the default for 64-bit PA HP-UX, but not for 32-bit +- # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag +- # sets the default TLS model and affects inlining. +- case $host_cpu in +- hppa*64*) +- # +Z the default +- ;; +- *) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' +- ;; +- esac +- ;; +- +- interix[[3-9]]*) +- # Interix 3.x gcc -fpic/-fPIC options generate broken code. +- # Instead, we relocate shared libraries at runtime. +- ;; +- +- msdosdjgpp*) +- # Just because we use GCC doesn't mean we suddenly get shared libraries +- # on systems that don't support them. +- _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no +- enable_shared=no +- ;; +- +- *nto* | *qnx*) +- # QNX uses GNU C++, but need to define -shared option too, otherwise +- # it will coredump. +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' +- ;; +- +- sysv4*MP*) +- if test -d /usr/nec; then +- _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic +- fi +- ;; +- +- *) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' +- ;; +- esac +- +- case $cc_basename in +- nvcc*) # Cuda Compiler Driver 2.2 +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker ' +- if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then +- _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)" +- fi +- ;; +- esac +- else +- # PORTME Check for flag to pass linker flags through the system compiler. +- case $host_os in +- aix*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- if test ia64 = "$host_cpu"; then +- # AIX 5 now supports IA64 processor +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- else +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' +- fi +- ;; +- +- darwin* | rhapsody*) +- # PIC is the default on this platform +- # Common symbols not allowed in MH_DYLIB files +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' +- case $cc_basename in +- nagfor*) +- # NAG Fortran compiler +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- ;; +- esac +- ;; +- +- mingw* | cygwin* | pw32* | os2* | cegcc*) +- # This hack is so that the source file can tell whether it is being +- # built for inclusion in a dll (and should export symbols for example). +- m4_if([$1], [GCJ], [], +- [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) +- case $host_os in +- os2*) +- _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' +- ;; +- esac +- ;; +- +- hpux9* | hpux10* | hpux11*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but +- # not for PA HP-UX. +- case $host_cpu in +- hppa*64*|ia64*) +- # +Z the default +- ;; +- *) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' +- ;; +- esac +- # Is there a better lt_prog_compiler_static that works with the bundled CC? +- _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' +- ;; +- +- irix5* | irix6* | nonstopux*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- # PIC (with -KPIC) is the default. +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' +- ;; +- +- linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) +- case $cc_basename in +- # old Intel for x86_64, which still supported -KPIC. +- ecc*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' +- ;; +- # icc used to be incompatible with GCC. +- # ICC 10 doesn't accept -KPIC any more. +- icc* | ifort*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' +- ;; +- # Lahey Fortran 8.1. +- lf95*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' +- ;; +- nagfor*) +- # NAG Fortran compiler +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- ;; +- tcc*) +- # Fabrice Bellard et al's Tiny C Compiler +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' +- ;; +- pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) +- # Portland Group compilers (*not* the Pentium gcc compiler, +- # which looks to be a dead project) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- ;; +- ccc*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- # All Alpha code is PIC. +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' +- ;; +- xl* | bgxl* | bgf* | mpixl*) +- # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' +- ;; +- *) +- case `$CC -V 2>&1 | $SED 5q` in +- *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*) +- # Sun Fortran 8.3 passes all unrecognized flags to the linker +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='' +- ;; +- *Sun\ F* | *Sun*Fortran*) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' +- ;; +- *Sun\ C*) +- # Sun C 5.9 +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- ;; +- *Intel*\ [[CF]]*Compiler*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' +- ;; +- *Portland\ Group*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- ;; +- esac +- ;; +- esac +- ;; +- +- newsos6) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- ;; +- +- *nto* | *qnx*) +- # QNX uses GNU C++, but need to define -shared option too, otherwise +- # it will coredump. +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' +- ;; +- +- osf3* | osf4* | osf5*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- # All OSF/1 code is PIC. +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' +- ;; +- +- rdos*) +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' +- ;; +- +- solaris*) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- case $cc_basename in +- f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; +- *) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; +- esac +- ;; +- +- sunos4*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- ;; +- +- sysv4 | sysv4.2uw2* | sysv4.3*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- ;; +- +- sysv4*MP*) +- if test -d /usr/nec; then +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- fi +- ;; +- +- sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- ;; +- +- unicos*) +- _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' +- _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no +- ;; +- +- uts4*) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' +- _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +- ;; +- +- *) +- _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no +- ;; +- esac +- fi +-]) +-case $host_os in +- # For platforms that do not support PIC, -DPIC is meaningless: +- *djgpp*) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)= +- ;; +- *) +- _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" +- ;; +-esac +- +-AC_CACHE_CHECK([for $compiler option to produce PIC], +- [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], +- [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) +-_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) +- +-# +-# Check to make sure the PIC flag actually works. +-# +-if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then +- _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], +- [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], +- [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], +- [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in +- "" | " "*) ;; +- *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; +- esac], +- [_LT_TAGVAR(lt_prog_compiler_pic, $1)= +- _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) +-fi +-_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], +- [Additional compiler flags for building library objects]) +- +-_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], +- [How to pass a linker flag through the compiler]) +-# +-# Check to make sure the static flag actually works. +-# +-wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" +-_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], +- _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), +- $lt_tmp_static_flag, +- [], +- [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) +-_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], +- [Compiler flag to prevent dynamic linking]) +-])# _LT_COMPILER_PIC +- +- +-# _LT_LINKER_SHLIBS([TAGNAME]) +-# ---------------------------- +-# See if the linker supports building shared libraries. +-m4_defun([_LT_LINKER_SHLIBS], +-[AC_REQUIRE([LT_PATH_LD])dnl +-AC_REQUIRE([LT_PATH_NM])dnl +-m4_require([_LT_PATH_MANIFEST_TOOL])dnl +-m4_require([_LT_FILEUTILS_DEFAULTS])dnl +-m4_require([_LT_DECL_EGREP])dnl +-m4_require([_LT_DECL_SED])dnl +-m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +-m4_require([_LT_TAG_COMPILER])dnl +-AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +-m4_if([$1], [CXX], [ +- _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +- _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] +- case $host_os in +- aix[[4-9]]*) +- # If we're using GNU nm, then we don't want the "-C" option. +- # -C means demangle to GNU nm, but means don't demangle to AIX nm. +- # Without the "-l" option, or with the "-B" option, AIX nm treats +- # weak defined symbols like other global defined symbols, whereas +- # GNU nm marks them as "W". +- # While the 'weak' keyword is ignored in the Export File, we need +- # it in the Import File for the 'aix-soname' feature, so we have +- # to replace the "-B" option with "-P" for AIX nm. +- if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then +- _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' +- else +- _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' +- fi +- ;; +- pw32*) +- _LT_TAGVAR(export_symbols_cmds, $1)=$ltdll_cmds +- ;; +- cygwin* | mingw* | cegcc*) +- case $cc_basename in +- cl* | icl*) +- _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' +- ;; +- *) +- _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' +- _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] +- ;; +- esac +- ;; +- *) +- _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +- ;; +- esac +-], [ +- runpath_var= +- _LT_TAGVAR(allow_undefined_flag, $1)= +- _LT_TAGVAR(always_export_symbols, $1)=no +- _LT_TAGVAR(archive_cmds, $1)= +- _LT_TAGVAR(archive_expsym_cmds, $1)= +- _LT_TAGVAR(compiler_needs_object, $1)=no +- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no +- _LT_TAGVAR(export_dynamic_flag_spec, $1)= +- _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +- _LT_TAGVAR(hardcode_automatic, $1)=no +- _LT_TAGVAR(hardcode_direct, $1)=no +- _LT_TAGVAR(hardcode_direct_absolute, $1)=no +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +- _LT_TAGVAR(hardcode_libdir_separator, $1)= +- _LT_TAGVAR(hardcode_minus_L, $1)=no +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported +- _LT_TAGVAR(inherit_rpath, $1)=no +- _LT_TAGVAR(link_all_deplibs, $1)=unknown +- _LT_TAGVAR(module_cmds, $1)= +- _LT_TAGVAR(module_expsym_cmds, $1)= +- _LT_TAGVAR(old_archive_from_new_cmds, $1)= +- _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= +- _LT_TAGVAR(thread_safe_flag_spec, $1)= +- _LT_TAGVAR(whole_archive_flag_spec, $1)= +- # include_expsyms should be a list of space-separated symbols to be *always* +- # included in the symbol list +- _LT_TAGVAR(include_expsyms, $1)= +- # exclude_expsyms can be an extended regexp of symbols to exclude +- # it will be wrapped by ' (' and ')$', so one must not match beginning or +- # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', +- # as well as any symbol that contains 'd'. +- _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] +- # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out +- # platforms (ab)use it in PIC code, but their linkers get confused if +- # the symbol is explicitly referenced. Since portable code cannot +- # rely on this symbol name, it's probably fine to never include it in +- # preloaded symbol tables. +- # Exclude shared library initialization/finalization symbols. +-dnl Note also adjust exclude_expsyms for C++ above. +- extract_expsyms_cmds= +- +- case $host_os in +- cygwin* | mingw* | pw32* | cegcc*) +- # FIXME: the MSVC++ and ICC port hasn't been tested in a loooong time +- # When not using gcc, we currently assume that we are using +- # Microsoft Visual C++ or Intel C++ Compiler. +- if test yes != "$GCC"; then +- with_gnu_ld=no +- fi +- ;; +- interix*) +- # we just hope/assume this is gcc and not c89 (= MSVC++ or ICC) +- with_gnu_ld=yes +- ;; +- openbsd* | bitrig*) +- with_gnu_ld=no +- ;; +- esac +- +- _LT_TAGVAR(ld_shlibs, $1)=yes +- +- # On some targets, GNU ld is compatible enough with the native linker +- # that we're better off using the native interface for both. +- lt_use_gnu_ld_interface=no +- if test yes = "$with_gnu_ld"; then +- case $host_os in +- aix*) +- # The AIX port of GNU ld has always aspired to compatibility +- # with the native linker. However, as the warning in the GNU ld +- # block says, versions before 2.19.5* couldn't really create working +- # shared libraries, regardless of the interface used. +- case `$LD -v 2>&1` in +- *\ \(GNU\ Binutils\)\ 2.19.5*) ;; +- *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;; +- *\ \(GNU\ Binutils\)\ [[3-9]]*) ;; +- *) +- lt_use_gnu_ld_interface=yes +- ;; +- esac +- ;; +- *) +- lt_use_gnu_ld_interface=yes +- ;; +- esac +- fi +- +- if test yes = "$lt_use_gnu_ld_interface"; then +- # If archive_cmds runs LD, not CC, wlarc should be empty +- wlarc='$wl' +- +- # Set some defaults for GNU ld with shared library support. These +- # are reset later if shared libraries are not supported. Putting them +- # here allows them to be overridden if necessary. +- runpath_var=LD_RUN_PATH +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' +- # ancient GNU ld didn't support --whole-archive et. al. +- if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then +- _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' +- else +- _LT_TAGVAR(whole_archive_flag_spec, $1)= +- fi +- supports_anon_versioning=no +- case `$LD -v | $SED -e 's/([[^)]]\+)\s\+//' 2>&1` in +- *GNU\ gold*) supports_anon_versioning=yes ;; +- *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 +- *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... +- *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... +- *\ 2.11.*) ;; # other 2.11 versions +- *) supports_anon_versioning=yes ;; +- esac +- +- # See if GNU ld supports shared libraries. +- case $host_os in +- aix[[3-9]]*) +- # On AIX/PPC, the GNU linker is very broken +- if test ia64 != "$host_cpu"; then +- _LT_TAGVAR(ld_shlibs, $1)=no +- cat <<_LT_EOF 1>&2 +- +-*** Warning: the GNU linker, at least up to release 2.19, is reported +-*** to be unable to reliably create shared libraries on AIX. +-*** Therefore, libtool is disabling shared libraries support. If you +-*** really care for shared libraries, you may want to install binutils +-*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +-*** You will then need to restart the configuration process. +- +-_LT_EOF +- fi +- ;; +- +- amigaos*) +- case $host_cpu in +- powerpc) +- # see comment about AmigaOS4 .so support +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='' +- ;; +- m68k) +- _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- _LT_TAGVAR(hardcode_minus_L, $1)=yes +- ;; +- esac +- ;; +- +- beos*) +- if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported +- # Joseph Beckenbach says some releases of gcc +- # support --undefined. This deserves some investigation. FIXME +- _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- else +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; +- +- cygwin* | mingw* | pw32* | cegcc*) +- # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, +- # as there is no search path for DLLs. +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols' +- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported +- _LT_TAGVAR(always_export_symbols, $1)=no +- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes +- _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' +- _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] +- +- if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +- # If the export-symbols file already is a .def file, use it as +- # is; otherwise, prepend EXPORTS... +- _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then +- cp $export_symbols $output_objdir/$soname.def; +- else +- echo EXPORTS > $output_objdir/$soname.def; +- cat $export_symbols >> $output_objdir/$soname.def; +- fi~ +- $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +- else +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; +- +- haiku*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(link_all_deplibs, $1)=yes +- ;; +- +- os2*) +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- _LT_TAGVAR(hardcode_minus_L, $1)=yes +- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported +- shrext_cmds=.dll +- _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ +- $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ +- $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ +- $ECHO EXPORTS >> $output_objdir/$libname.def~ +- emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ +- $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ +- emximp -o $lib $output_objdir/$libname.def' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ +- $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ +- $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ +- $ECHO EXPORTS >> $output_objdir/$libname.def~ +- prefix_cmds="$SED"~ +- if test EXPORTS = "`$SED 1q $export_symbols`"; then +- prefix_cmds="$prefix_cmds -e 1d"; +- fi~ +- prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ +- cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ +- $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ +- emximp -o $lib $output_objdir/$libname.def' +- _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' +- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes +- _LT_TAGVAR(file_list_spec, $1)='@' +- ;; +- +- interix[[3-9]]*) +- _LT_TAGVAR(hardcode_direct, $1)=no +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' +- # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. +- # Instead, shared libraries are loaded at an image base (0x10000000 by +- # default) and relocated if they conflict, which is a slow very memory +- # consuming and fragmenting process. To avoid this, we pick a random, +- # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link +- # time. Moving up from 0x10000000 also allows more sbrk(2) space. +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' +- ;; +- +- gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) +- tmp_diet=no +- if test linux-dietlibc = "$host_os"; then +- case $cc_basename in +- diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) +- esac +- fi +- if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ +- && test no = "$tmp_diet" +- then +- tmp_addflag=' $pic_flag' +- tmp_sharedflag='-shared' +- case $cc_basename,$host_cpu in +- pgcc*) # Portland Group C compiler +- _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' +- tmp_addflag=' $pic_flag' +- ;; +- pgf77* | pgf90* | pgf95* | pgfortran*) +- # Portland Group f77 and f90 compilers +- _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' +- tmp_addflag=' $pic_flag -Mnomain' ;; +- ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 +- tmp_addflag=' -i_dynamic' ;; +- efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 +- tmp_addflag=' -i_dynamic -nofor_main' ;; +- ifc* | ifort*) # Intel Fortran compiler +- tmp_addflag=' -nofor_main' ;; +- lf95*) # Lahey Fortran 8.1 +- _LT_TAGVAR(whole_archive_flag_spec, $1)= +- tmp_sharedflag='--shared' ;; +- nagfor*) # NAGFOR 5.3 +- tmp_sharedflag='-Wl,-shared' ;; +- xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) +- tmp_sharedflag='-qmkshrobj' +- tmp_addflag= ;; +- nvcc*) # Cuda Compiler Driver 2.2 +- _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' +- _LT_TAGVAR(compiler_needs_object, $1)=yes +- ;; +- esac +- case `$CC -V 2>&1 | $SED 5q` in +- *Sun\ C*) # Sun C 5.9 +- _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' +- _LT_TAGVAR(compiler_needs_object, $1)=yes +- tmp_sharedflag='-G' ;; +- *Sun\ F*) # Sun Fortran 8.3 +- tmp_sharedflag='-G' ;; +- esac +- _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- +- if test yes = "$supports_anon_versioning"; then +- _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ +- cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +- echo "local: *; };" >> $output_objdir/$libname.ver~ +- $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' +- fi +- +- case $cc_basename in +- tcc*) +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='-rdynamic' +- ;; +- xlf* | bgf* | bgxlf* | mpixlf*) +- # IBM XL Fortran 10.1 on PPC cannot create shared libs itself +- _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' +- _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' +- if test yes = "$supports_anon_versioning"; then +- _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ +- cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +- echo "local: *; };" >> $output_objdir/$libname.ver~ +- $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' +- fi +- ;; +- esac +- else +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; +- +- netbsd*) +- if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then +- _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' +- wlarc= +- else +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' +- fi +- ;; +- +- solaris*) +- if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then +- _LT_TAGVAR(ld_shlibs, $1)=no +- cat <<_LT_EOF 1>&2 +- +-*** Warning: The releases 2.8.* of the GNU linker cannot reliably +-*** create shared libraries on Solaris systems. Therefore, libtool +-*** is disabling shared libraries support. We urge you to upgrade GNU +-*** binutils to release 2.9.1 or newer. Another option is to modify +-*** your PATH or compiler configuration so that the native linker is +-*** used, and then restart. +- +-_LT_EOF +- elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' +- else +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; +- +- sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) +- case `$LD -v 2>&1` in +- *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) +- _LT_TAGVAR(ld_shlibs, $1)=no +- cat <<_LT_EOF 1>&2 +- +-*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot +-*** reliably create shared libraries on SCO systems. Therefore, libtool +-*** is disabling shared libraries support. We urge you to upgrade GNU +-*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +-*** your PATH or compiler configuration so that the native linker is +-*** used, and then restart. +- +-_LT_EOF +- ;; +- *) +- # For security reasons, it is highly recommended that you always +- # use absolute paths for naming shared libraries, and exclude the +- # DT_RUNPATH tag from executables and libraries. But doing so +- # requires that you compile everything twice, which is a pain. +- if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' +- else +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; +- esac +- ;; +- +- sunos4*) +- _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' +- wlarc= +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- ;; +- +- *) +- if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' +- else +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; +- esac +- +- if test no = "$_LT_TAGVAR(ld_shlibs, $1)"; then +- runpath_var= +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +- _LT_TAGVAR(export_dynamic_flag_spec, $1)= +- _LT_TAGVAR(whole_archive_flag_spec, $1)= +- fi +- else +- # PORTME fill in a description of your system's linker (not GNU ld) +- case $host_os in +- aix3*) +- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported +- _LT_TAGVAR(always_export_symbols, $1)=yes +- _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' +- # Note: this linker hardcodes the directories in LIBPATH if there +- # are no directories specified by -L. +- _LT_TAGVAR(hardcode_minus_L, $1)=yes +- if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then +- # Neither direct hardcoding nor static linking is supported with a +- # broken collect2. +- _LT_TAGVAR(hardcode_direct, $1)=unsupported +- fi +- ;; +- +- aix[[4-9]]*) +- if test ia64 = "$host_cpu"; then +- # On IA64, the linker does run time linking by default, so we don't +- # have to do anything special. +- aix_use_runtimelinking=no +- exp_sym_flag='-Bexport' +- no_entry_flag= +- else +- # If we're using GNU nm, then we don't want the "-C" option. +- # -C means demangle to GNU nm, but means don't demangle to AIX nm. +- # Without the "-l" option, or with the "-B" option, AIX nm treats +- # weak defined symbols like other global defined symbols, whereas +- # GNU nm marks them as "W". +- # While the 'weak' keyword is ignored in the Export File, we need +- # it in the Import File for the 'aix-soname' feature, so we have +- # to replace the "-B" option with "-P" for AIX nm. +- if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then +- _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' +- else +- _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' +- fi +- aix_use_runtimelinking=no +- +- # Test if we are trying to use run time linking or normal +- # AIX style linking. If -brtl is somewhere in LDFLAGS, we +- # have runtime linking enabled, and use it for executables. +- # For shared libraries, we enable/disable runtime linking +- # depending on the kind of the shared library created - +- # when "with_aix_soname,aix_use_runtimelinking" is: +- # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables +- # "aix,yes" lib.so shared, rtl:yes, for executables +- # lib.a static archive +- # "both,no" lib.so.V(shr.o) shared, rtl:yes +- # lib.a(lib.so.V) shared, rtl:no, for executables +- # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables +- # lib.a(lib.so.V) shared, rtl:no +- # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables +- # lib.a static archive +- case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) +- for ld_flag in $LDFLAGS; do +- if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then +- aix_use_runtimelinking=yes +- break +- fi +- done +- if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then +- # With aix-soname=svr4, we create the lib.so.V shared archives only, +- # so we don't have lib.a shared libs to link our executables. +- # We have to force runtime linking in this case. +- aix_use_runtimelinking=yes +- LDFLAGS="$LDFLAGS -Wl,-brtl" +- fi +- ;; +- esac +- +- exp_sym_flag='-bexport' +- no_entry_flag='-bnoentry' +- fi +- +- # When large executables or shared objects are built, AIX ld can +- # have problems creating the table of contents. If linking a library +- # or program results in "error TOC overflow" add -mminimal-toc to +- # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not +- # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. +- +- _LT_TAGVAR(archive_cmds, $1)='' +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_direct_absolute, $1)=yes +- _LT_TAGVAR(hardcode_libdir_separator, $1)=':' +- _LT_TAGVAR(link_all_deplibs, $1)=yes +- _LT_TAGVAR(file_list_spec, $1)='$wl-f,' +- case $with_aix_soname,$aix_use_runtimelinking in +- aix,*) ;; # traditional, no import file +- svr4,* | *,yes) # use import file +- # The Import File defines what to hardcode. +- _LT_TAGVAR(hardcode_direct, $1)=no +- _LT_TAGVAR(hardcode_direct_absolute, $1)=no +- ;; +- esac +- +- if test yes = "$GCC"; then +- case $host_os in aix4.[[012]]|aix4.[[012]].*) +- # We only want to do this on AIX 4.2 and lower, the check +- # below for broken collect2 doesn't work under 4.3+ +- collect2name=`$CC -print-prog-name=collect2` +- if test -f "$collect2name" && +- strings "$collect2name" | $GREP resolve_lib_name >/dev/null +- then +- # We have reworked collect2 +- : +- else +- # We have old collect2 +- _LT_TAGVAR(hardcode_direct, $1)=unsupported +- # It fails to find uninstalled libraries when the uninstalled +- # path is not listed in the libpath. Setting hardcode_minus_L +- # to unsupported forces relinking +- _LT_TAGVAR(hardcode_minus_L, $1)=yes +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)= +- fi +- ;; +- esac +- shared_flag='-shared' +- if test yes = "$aix_use_runtimelinking"; then +- shared_flag="$shared_flag "'$wl-G' +- fi +- # Need to ensure runtime linking is disabled for the traditional +- # shared library, or the linker may eventually find shared libraries +- # /with/ Import File - we do not want to mix them. +- shared_flag_aix='-shared' +- shared_flag_svr4='-shared $wl-G' +- else +- # not using gcc +- if test ia64 = "$host_cpu"; then +- # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release +- # chokes on -Wl,-G. The following line is correct: +- shared_flag='-G' +- else +- if test yes = "$aix_use_runtimelinking"; then +- shared_flag='$wl-G' +- else +- shared_flag='$wl-bM:SRE' +- fi +- shared_flag_aix='$wl-bM:SRE' +- shared_flag_svr4='$wl-G' +- fi +- fi +- +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall' +- # It seems that -bexpall does not export symbols beginning with +- # underscore (_), so it is better to generate a list of symbols to export. +- _LT_TAGVAR(always_export_symbols, $1)=yes +- if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then +- # Warning - without using the other runtime loading flags (-brtl), +- # -berok will link without error, but may produce a broken library. +- _LT_TAGVAR(allow_undefined_flag, $1)='-berok' +- # Determine the default libpath from the value encoded in an +- # empty executable. +- _LT_SYS_MODULE_PATH_AIX([$1]) +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag +- else +- if test ia64 = "$host_cpu"; then +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib' +- _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" +- _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" +- else +- # Determine the default libpath from the value encoded in an +- # empty executable. +- _LT_SYS_MODULE_PATH_AIX([$1]) +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" +- # Warning - without using the other run time loading flags, +- # -berok will link without error, but may produce a broken library. +- _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok' +- _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok' +- if test yes = "$with_gnu_ld"; then +- # We only use this code for GNU lds that support --whole-archive. +- _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' +- else +- # Exported symbols can be pulled into shared objects from archives +- _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' +- fi +- _LT_TAGVAR(archive_cmds_need_lc, $1)=yes +- _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' +- # -brtl affects multiple linker settings, -berok does not and is overridden later +- compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`' +- if test svr4 != "$with_aix_soname"; then +- # This is similar to how AIX traditionally builds its shared libraries. +- _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' +- fi +- if test aix != "$with_aix_soname"; then +- _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' +- else +- # used by -dlpreopen to get the symbols +- _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV $output_objdir/$realname.d/$soname $output_objdir' +- fi +- _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d' +- fi +- fi +- ;; +- +- amigaos*) +- case $host_cpu in +- powerpc) +- # see comment about AmigaOS4 .so support +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='' +- ;; +- m68k) +- _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- _LT_TAGVAR(hardcode_minus_L, $1)=yes +- ;; +- esac +- ;; +- +- bsdi[[45]]*) +- _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic +- ;; +- +- cygwin* | mingw* | pw32* | cegcc*) +- # When not using gcc, we currently assume that we are using +- # Microsoft Visual C++ or Intel C++ Compiler. +- # hardcode_libdir_flag_spec is actually meaningless, as there is +- # no search path for DLLs. +- case $cc_basename in +- cl* | icl*) +- # Native MSVC or ICC +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' +- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported +- _LT_TAGVAR(always_export_symbols, $1)=yes +- _LT_TAGVAR(file_list_spec, $1)='@' +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=.dll +- # FIXME: Setting linknames here is a bad hack. +- _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' +- _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then +- cp "$export_symbols" "$output_objdir/$soname.def"; +- echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; +- else +- $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; +- fi~ +- $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ +- linknames=' +- # The linker will not automatically build a static lib if we build a DLL. +- # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' +- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes +- _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' +- _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' +- # Don't use ranlib +- _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' +- _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ +- lt_tool_outputfile="@TOOL_OUTPUT@"~ +- case $lt_outputfile in +- *.exe|*.EXE) ;; +- *) +- lt_outputfile=$lt_outputfile.exe +- lt_tool_outputfile=$lt_tool_outputfile.exe +- ;; +- esac~ +- if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then +- $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; +- $RM "$lt_outputfile.manifest"; +- fi' +- ;; +- *) +- # Assume MSVC and ICC wrapper +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' +- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=.dll +- # FIXME: Setting linknames here is a bad hack. +- _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' +- # The linker will automatically build a .lib file if we build a DLL. +- _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' +- # FIXME: Should let the user specify the lib program. +- _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' +- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes +- ;; +- esac +- ;; +- +- darwin* | rhapsody*) +- _LT_DARWIN_LINKER_FEATURES($1) +- ;; +- +- dgux*) +- _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- ;; +- +- # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor +- # support. Future versions do this automatically, but an explicit c++rt0.o +- # does not break anything, and helps significantly (at the cost of a little +- # extra space). +- freebsd2.2*) +- _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- ;; +- +- # Unfortunately, older versions of FreeBSD 2 do not have this feature. +- freebsd2.*) +- _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_minus_L, $1)=yes +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- ;; +- +- # FreeBSD 3 and greater uses gcc -shared to do shared libraries. +- freebsd* | dragonfly* | midnightbsd*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- ;; +- +- hpux9*) +- if test yes = "$GCC"; then +- _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' +- else +- _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' +- fi +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- _LT_TAGVAR(hardcode_direct, $1)=yes +- +- # hardcode_minus_L: Not really in the search PATH, +- # but as the default location of the library. +- _LT_TAGVAR(hardcode_minus_L, $1)=yes +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' +- ;; +- +- hpux10*) +- if test yes,no = "$GCC,$with_gnu_ld"; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' +- else +- _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' +- fi +- if test no = "$with_gnu_ld"; then +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_direct_absolute, $1)=yes +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' +- # hardcode_minus_L: Not really in the search PATH, +- # but as the default location of the library. +- _LT_TAGVAR(hardcode_minus_L, $1)=yes +- fi +- ;; +- +- hpux11*) +- if test yes,no = "$GCC,$with_gnu_ld"; then +- case $host_cpu in +- hppa*64*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' +- ;; +- ia64*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' +- ;; +- *) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' +- ;; +- esac +- else +- case $host_cpu in +- hppa*64*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' +- ;; +- ia64*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' +- ;; +- *) +- m4_if($1, [], [ +- # Older versions of the 11.00 compiler do not understand -b yet +- # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) +- _LT_LINKER_OPTION([if $CC understands -b], +- _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b], +- [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'], +- [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])], +- [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags']) +- ;; +- esac +- fi +- if test no = "$with_gnu_ld"; then +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- +- case $host_cpu in +- hppa*64*|ia64*) +- _LT_TAGVAR(hardcode_direct, $1)=no +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- ;; +- *) +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_direct_absolute, $1)=yes +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' +- +- # hardcode_minus_L: Not really in the search PATH, +- # but as the default location of the library. +- _LT_TAGVAR(hardcode_minus_L, $1)=yes +- ;; +- esac +- fi +- ;; +- +- irix5* | irix6* | nonstopux*) +- if test yes = "$GCC"; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' +- # Try to use the -exported_symbol ld option, if it does not +- # work, assume that -exports_file does not work either and +- # implicitly export all symbols. +- # This should be the same for all languages, so no per-tag cache variable. +- AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], +- [lt_cv_irix_exported_symbol], +- [save_LDFLAGS=$LDFLAGS +- LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" +- AC_LINK_IFELSE( +- [AC_LANG_SOURCE( +- [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], +- [C++], [[int foo (void) { return 0; }]], +- [Fortran 77], [[ +- subroutine foo +- end]], +- [Fortran], [[ +- subroutine foo +- end]])])], +- [lt_cv_irix_exported_symbol=yes], +- [lt_cv_irix_exported_symbol=no]) +- LDFLAGS=$save_LDFLAGS]) +- if test yes = "$lt_cv_irix_exported_symbol"; then +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' +- fi +- else +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' +- fi +- _LT_TAGVAR(archive_cmds_need_lc, $1)='no' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- _LT_TAGVAR(inherit_rpath, $1)=yes +- _LT_TAGVAR(link_all_deplibs, $1)=yes +- ;; +- +- linux*) +- case $cc_basename in +- tcc*) +- # Fabrice Bellard et al's Tiny C Compiler +- _LT_TAGVAR(ld_shlibs, $1)=yes +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' +- ;; +- esac +- ;; +- +- netbsd*) +- if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then +- _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out +- else +- _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF +- fi +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- ;; +- +- newsos6) +- _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- ;; +- +- *nto* | *qnx*) +- ;; +- +- openbsd* | bitrig*) +- if test -f /usr/libexec/ld.so; then +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- _LT_TAGVAR(hardcode_direct_absolute, $1)=yes +- if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' +- else +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' +- fi +- else +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; +- +- os2*) +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- _LT_TAGVAR(hardcode_minus_L, $1)=yes +- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported +- shrext_cmds=.dll +- _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ +- $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ +- $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ +- $ECHO EXPORTS >> $output_objdir/$libname.def~ +- emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ +- $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ +- emximp -o $lib $output_objdir/$libname.def' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ +- $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ +- $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ +- $ECHO EXPORTS >> $output_objdir/$libname.def~ +- prefix_cmds="$SED"~ +- if test EXPORTS = "`$SED 1q $export_symbols`"; then +- prefix_cmds="$prefix_cmds -e 1d"; +- fi~ +- prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ +- cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ +- $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ +- emximp -o $lib $output_objdir/$libname.def' +- _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' +- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes +- _LT_TAGVAR(file_list_spec, $1)='@' +- ;; +- +- osf3*) +- if test yes = "$GCC"; then +- _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' +- else +- _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' +- fi +- _LT_TAGVAR(archive_cmds_need_lc, $1)='no' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- ;; +- +- osf4* | osf5*) # as osf3* with the addition of -msym flag +- if test yes = "$GCC"; then +- _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' +- else +- _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ +- $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' +- +- # Both c and cxx compiler support -rpath directly +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' +- fi +- _LT_TAGVAR(archive_cmds_need_lc, $1)='no' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- ;; +- +- solaris*) +- _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' +- if test yes = "$GCC"; then +- wlarc='$wl' +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' +- else +- case `$CC -V 2>&1` in +- *"Compilers 5.0"*) +- wlarc='' +- _LT_TAGVAR(archive_cmds, $1)='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' +- ;; +- *) +- wlarc='$wl' +- _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' +- ;; +- esac +- fi +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- case $host_os in +- solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; +- *) +- # The compiler driver will combine and reorder linker options, +- # but understands '-z linker_flag'. GCC discards it without '$wl', +- # but is careful enough not to reorder. +- # Supported since Solaris 2.6 (maybe 2.5.1?) +- if test yes = "$GCC"; then +- _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' +- else +- _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' +- fi +- ;; +- esac +- _LT_TAGVAR(link_all_deplibs, $1)=yes +- ;; +- +- sunos4*) +- if test sequent = "$host_vendor"; then +- # Use $CC to link under sequent, because it throws in some extra .o +- # files that make .init and .fini sections work. +- _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' +- else +- _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' +- fi +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_minus_L, $1)=yes +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- ;; +- +- sysv4) +- case $host_vendor in +- sni) +- _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' +- _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? +- ;; +- siemens) +- ## LD is ld it makes a PLAMLIB +- ## CC just makes a GrossModule. +- _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' +- _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' +- _LT_TAGVAR(hardcode_direct, $1)=no +- ;; +- motorola) +- _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' +- _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie +- ;; +- esac +- runpath_var='LD_RUN_PATH' +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- ;; +- +- sysv4.3*) +- _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' +- ;; +- +- sysv4*MP*) +- if test -d /usr/nec; then +- _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- runpath_var=LD_RUN_PATH +- hardcode_runpath_var=yes +- _LT_TAGVAR(ld_shlibs, $1)=yes +- fi +- ;; +- +- sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) +- _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' +- _LT_TAGVAR(archive_cmds_need_lc, $1)=no +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- runpath_var='LD_RUN_PATH' +- +- if test yes = "$GCC"; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- else +- _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- fi +- ;; +- +- sysv5* | sco3.2v5* | sco5v6*) +- # Note: We CANNOT use -z defs as we might desire, because we do not +- # link with -lc, and that would cause any symbols used from libc to +- # always be unresolved, which means just about no library would +- # ever link correctly. If we're not using GNU ld we use -z text +- # though, which does catch some bad symbols but isn't as heavy-handed +- # as -z defs. +- _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' +- _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs' +- _LT_TAGVAR(archive_cmds_need_lc, $1)=no +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=':' +- _LT_TAGVAR(link_all_deplibs, $1)=yes +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport' +- runpath_var='LD_RUN_PATH' +- +- if test yes = "$GCC"; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- else +- _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- fi +- ;; +- +- uts4*) +- _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- ;; +- +- *) +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- esac +- +- if test sni = "$host_vendor"; then +- case $host in +- sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Blargedynsym' +- ;; +- esac +- fi +- fi +-]) +-AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) +-test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no +- +-_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld +- +-_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl +-_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl +-_LT_DECL([], [extract_expsyms_cmds], [2], +- [The commands to extract the exported symbol list from a shared archive]) +- +-# +-# Do we need to explicitly link libc? +-# +-case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in +-x|xyes) +- # Assume -lc should be added +- _LT_TAGVAR(archive_cmds_need_lc, $1)=yes +- +- if test yes,yes = "$GCC,$enable_shared"; then +- case $_LT_TAGVAR(archive_cmds, $1) in +- *'~'*) +- # FIXME: we may have to deal with multi-command sequences. +- ;; +- '$CC '*) +- # Test whether the compiler implicitly links with -lc since on some +- # systems, -lgcc has to come before -lc. If gcc already passes -lc +- # to ld, don't add -lc before -lgcc. +- AC_CACHE_CHECK([whether -lc should be explicitly linked in], +- [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1), +- [$RM conftest* +- echo "$lt_simple_compile_test_code" > conftest.$ac_ext +- +- if AC_TRY_EVAL(ac_compile) 2>conftest.err; then +- soname=conftest +- lib=conftest +- libobjs=conftest.$ac_objext +- deplibs= +- wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) +- pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) +- compiler_flags=-v +- linker_flags=-v +- verstring= +- output_objdir=. +- libname=conftest +- lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) +- _LT_TAGVAR(allow_undefined_flag, $1)= +- if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) +- then +- lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no +- else +- lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes +- fi +- _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag +- else +- cat conftest.err 1>&5 +- fi +- $RM conftest* +- ]) +- _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1) +- ;; +- esac +- fi +- ;; +-esac +- +-_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], +- [Whether or not to add -lc for building shared libraries]) +-_LT_TAGDECL([allow_libtool_libs_with_static_runtimes], +- [enable_shared_with_static_runtimes], [0], +- [Whether or not to disallow shared libs when runtime libs are static]) +-_LT_TAGDECL([], [export_dynamic_flag_spec], [1], +- [Compiler flag to allow reflexive dlopens]) +-_LT_TAGDECL([], [whole_archive_flag_spec], [1], +- [Compiler flag to generate shared objects directly from archives]) +-_LT_TAGDECL([], [compiler_needs_object], [1], +- [Whether the compiler copes with passing no objects directly]) +-_LT_TAGDECL([], [old_archive_from_new_cmds], [2], +- [Create an old-style archive from a shared archive]) +-_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], +- [Create a temporary old-style archive to link instead of a shared archive]) +-_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) +-_LT_TAGDECL([], [archive_expsym_cmds], [2]) +-_LT_TAGDECL([], [module_cmds], [2], +- [Commands used to build a loadable module if different from building +- a shared archive.]) +-_LT_TAGDECL([], [module_expsym_cmds], [2]) +-_LT_TAGDECL([], [with_gnu_ld], [1], +- [Whether we are building with GNU ld or not]) +-_LT_TAGDECL([], [allow_undefined_flag], [1], +- [Flag that allows shared libraries with undefined symbols to be built]) +-_LT_TAGDECL([], [no_undefined_flag], [1], +- [Flag that enforces no undefined symbols]) +-_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], +- [Flag to hardcode $libdir into a binary during linking. +- This must work even if $libdir does not exist]) +-_LT_TAGDECL([], [hardcode_libdir_separator], [1], +- [Whether we need a single "-rpath" flag with a separated argument]) +-_LT_TAGDECL([], [hardcode_direct], [0], +- [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +- DIR into the resulting binary]) +-_LT_TAGDECL([], [hardcode_direct_absolute], [0], +- [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes +- DIR into the resulting binary and the resulting library dependency is +- "absolute", i.e impossible to change by setting $shlibpath_var if the +- library is relocated]) +-_LT_TAGDECL([], [hardcode_minus_L], [0], +- [Set to "yes" if using the -LDIR flag during linking hardcodes DIR +- into the resulting binary]) +-_LT_TAGDECL([], [hardcode_shlibpath_var], [0], +- [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +- into the resulting binary]) +-_LT_TAGDECL([], [hardcode_automatic], [0], +- [Set to "yes" if building a shared library automatically hardcodes DIR +- into the library and all subsequent libraries and executables linked +- against it]) +-_LT_TAGDECL([], [inherit_rpath], [0], +- [Set to yes if linker adds runtime paths of dependent libraries +- to runtime path list]) +-_LT_TAGDECL([], [link_all_deplibs], [0], +- [Whether libtool must link a program against all its dependency libraries]) +-_LT_TAGDECL([], [always_export_symbols], [0], +- [Set to "yes" if exported symbols are required]) +-_LT_TAGDECL([], [export_symbols_cmds], [2], +- [The commands to list exported symbols]) +-_LT_TAGDECL([], [exclude_expsyms], [1], +- [Symbols that should not be listed in the preloaded symbols]) +-_LT_TAGDECL([], [include_expsyms], [1], +- [Symbols that must always be exported]) +-_LT_TAGDECL([], [prelink_cmds], [2], +- [Commands necessary for linking programs (against libraries) with templates]) +-_LT_TAGDECL([], [postlink_cmds], [2], +- [Commands necessary for finishing linking programs]) +-_LT_TAGDECL([], [file_list_spec], [1], +- [Specify filename containing input files]) +-dnl FIXME: Not yet implemented +-dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], +-dnl [Compiler flag to generate thread safe objects]) +-])# _LT_LINKER_SHLIBS +- +- +-# _LT_LANG_C_CONFIG([TAG]) +-# ------------------------ +-# Ensure that the configuration variables for a C compiler are suitably +-# defined. These variables are subsequently used by _LT_CONFIG to write +-# the compiler configuration to 'libtool'. +-m4_defun([_LT_LANG_C_CONFIG], +-[m4_require([_LT_DECL_EGREP])dnl +-lt_save_CC=$CC +-AC_LANG_PUSH(C) +- +-# Source file extension for C test sources. +-ac_ext=c +- +-# Object file extension for compiled C test sources. +-objext=o +-_LT_TAGVAR(objext, $1)=$objext +- +-# Code to be used in simple compile tests +-lt_simple_compile_test_code="int some_variable = 0;" +- +-# Code to be used in simple link tests +-lt_simple_link_test_code='int main(){return(0);}' +- +-_LT_TAG_COMPILER +-# Save the default compiler, since it gets overwritten when the other +-# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +-compiler_DEFAULT=$CC +- +-# save warnings/boilerplate of simple test code +-_LT_COMPILER_BOILERPLATE +-_LT_LINKER_BOILERPLATE +- +-if test -n "$compiler"; then +- _LT_COMPILER_NO_RTTI($1) +- _LT_COMPILER_PIC($1) +- _LT_COMPILER_C_O($1) +- _LT_COMPILER_FILE_LOCKS($1) +- _LT_LINKER_SHLIBS($1) +- _LT_SYS_DYNAMIC_LINKER($1) +- _LT_LINKER_HARDCODE_LIBPATH($1) +- LT_SYS_DLOPEN_SELF +- _LT_CMD_STRIPLIB +- +- # Report what library types will actually be built +- AC_MSG_CHECKING([if libtool supports shared libraries]) +- AC_MSG_RESULT([$can_build_shared]) +- +- AC_MSG_CHECKING([whether to build shared libraries]) +- test no = "$can_build_shared" && enable_shared=no +- +- # On AIX, shared libraries and static libraries use the same namespace, and +- # are all built from PIC. +- case $host_os in +- aix3*) +- test yes = "$enable_shared" && enable_static=no +- if test -n "$RANLIB"; then +- archive_cmds="$archive_cmds~\$RANLIB \$lib" +- postinstall_cmds='$RANLIB $lib' +- fi +- ;; +- +- aix[[4-9]]*) +- if test ia64 != "$host_cpu"; then +- case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in +- yes,aix,yes) ;; # shared object as lib.so file only +- yes,svr4,*) ;; # shared object as lib.so archive member only +- yes,*) enable_static=no ;; # shared object in lib.a archive as well +- esac +- fi +- ;; +- esac +- AC_MSG_RESULT([$enable_shared]) +- +- AC_MSG_CHECKING([whether to build static libraries]) +- # Make sure either enable_shared or enable_static is yes. +- test yes = "$enable_shared" || enable_static=yes +- AC_MSG_RESULT([$enable_static]) +- +- _LT_CONFIG($1) +-fi +-AC_LANG_POP +-CC=$lt_save_CC +-])# _LT_LANG_C_CONFIG +- +- +-# _LT_LANG_CXX_CONFIG([TAG]) +-# -------------------------- +-# Ensure that the configuration variables for a C++ compiler are suitably +-# defined. These variables are subsequently used by _LT_CONFIG to write +-# the compiler configuration to 'libtool'. +-m4_defun([_LT_LANG_CXX_CONFIG], +-[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +-m4_require([_LT_DECL_EGREP])dnl +-m4_require([_LT_PATH_MANIFEST_TOOL])dnl +-if test -n "$CXX" && ( test no != "$CXX" && +- ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || +- (test g++ != "$CXX"))); then +- AC_PROG_CXXCPP +-else +- _lt_caught_CXX_error=yes +-fi +- +-AC_LANG_PUSH(C++) +-_LT_TAGVAR(archive_cmds_need_lc, $1)=no +-_LT_TAGVAR(allow_undefined_flag, $1)= +-_LT_TAGVAR(always_export_symbols, $1)=no +-_LT_TAGVAR(archive_expsym_cmds, $1)= +-_LT_TAGVAR(compiler_needs_object, $1)=no +-_LT_TAGVAR(export_dynamic_flag_spec, $1)= +-_LT_TAGVAR(hardcode_direct, $1)=no +-_LT_TAGVAR(hardcode_direct_absolute, $1)=no +-_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +-_LT_TAGVAR(hardcode_libdir_separator, $1)= +-_LT_TAGVAR(hardcode_minus_L, $1)=no +-_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported +-_LT_TAGVAR(hardcode_automatic, $1)=no +-_LT_TAGVAR(inherit_rpath, $1)=no +-_LT_TAGVAR(module_cmds, $1)= +-_LT_TAGVAR(module_expsym_cmds, $1)= +-_LT_TAGVAR(link_all_deplibs, $1)=unknown +-_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +-_LT_TAGVAR(reload_flag, $1)=$reload_flag +-_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +-_LT_TAGVAR(no_undefined_flag, $1)= +-_LT_TAGVAR(whole_archive_flag_spec, $1)= +-_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no +- +-# Source file extension for C++ test sources. +-ac_ext=cpp +- +-# Object file extension for compiled C++ test sources. +-objext=o +-_LT_TAGVAR(objext, $1)=$objext +- +-# No sense in running all these tests if we already determined that +-# the CXX compiler isn't working. Some variables (like enable_shared) +-# are currently assumed to apply to all compilers on this platform, +-# and will be corrupted by setting them based on a non-working compiler. +-if test yes != "$_lt_caught_CXX_error"; then +- # Code to be used in simple compile tests +- lt_simple_compile_test_code="int some_variable = 0;" +- +- # Code to be used in simple link tests +- lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' +- +- # ltmain only uses $CC for tagged configurations so make sure $CC is set. +- _LT_TAG_COMPILER +- +- # save warnings/boilerplate of simple test code +- _LT_COMPILER_BOILERPLATE +- _LT_LINKER_BOILERPLATE +- +- # Allow CC to be a program name with arguments. +- lt_save_CC=$CC +- lt_save_CFLAGS=$CFLAGS +- lt_save_LD=$LD +- lt_save_GCC=$GCC +- GCC=$GXX +- lt_save_with_gnu_ld=$with_gnu_ld +- lt_save_path_LD=$lt_cv_path_LD +- if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then +- lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx +- else +- $as_unset lt_cv_prog_gnu_ld +- fi +- if test -n "${lt_cv_path_LDCXX+set}"; then +- lt_cv_path_LD=$lt_cv_path_LDCXX +- else +- $as_unset lt_cv_path_LD +- fi +- test -z "${LDCXX+set}" || LD=$LDCXX +- CC=${CXX-"c++"} +- CFLAGS=$CXXFLAGS +- compiler=$CC +- _LT_TAGVAR(compiler, $1)=$CC +- _LT_CC_BASENAME([$compiler]) +- +- if test -n "$compiler"; then +- # We don't want -fno-exception when compiling C++ code, so set the +- # no_builtin_flag separately +- if test yes = "$GXX"; then +- _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' +- else +- _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= +- fi +- +- if test yes = "$GXX"; then +- # Set up default GNU C++ configuration +- +- LT_PATH_LD +- +- # Check if GNU C++ uses GNU ld as the underlying linker, since the +- # archiving commands below assume that GNU ld is being used. +- if test yes = "$with_gnu_ld"; then +- _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' +- +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' +- +- # If archive_cmds runs LD, not CC, wlarc should be empty +- # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to +- # investigate it a little bit more. (MM) +- wlarc='$wl' +- +- # ancient GNU ld didn't support --whole-archive et. al. +- if eval "`$CC -print-prog-name=ld` --help 2>&1" | +- $GREP 'no-whole-archive' > /dev/null; then +- _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' +- else +- _LT_TAGVAR(whole_archive_flag_spec, $1)= +- fi +- else +- with_gnu_ld=no +- wlarc= +- +- # A generic and very simple default shared library creation +- # command for GNU C++ for the case where it uses the native +- # linker, instead of GNU ld. If possible, this setting should +- # overridden to take advantage of the native linker features on +- # the platform it is being used on. +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' +- fi +- +- # Commands to make compiler produce verbose output that lists +- # what "hidden" libraries, object files and flags are used when +- # linking a shared library. +- output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' +- +- else +- GXX=no +- with_gnu_ld=no +- wlarc= +- fi +- +- # PORTME: fill in a description of your system's C++ link characteristics +- AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +- _LT_TAGVAR(ld_shlibs, $1)=yes +- case $host_os in +- aix3*) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- aix[[4-9]]*) +- if test ia64 = "$host_cpu"; then +- # On IA64, the linker does run time linking by default, so we don't +- # have to do anything special. +- aix_use_runtimelinking=no +- exp_sym_flag='-Bexport' +- no_entry_flag= +- else +- aix_use_runtimelinking=no +- +- # Test if we are trying to use run time linking or normal +- # AIX style linking. If -brtl is somewhere in LDFLAGS, we +- # have runtime linking enabled, and use it for executables. +- # For shared libraries, we enable/disable runtime linking +- # depending on the kind of the shared library created - +- # when "with_aix_soname,aix_use_runtimelinking" is: +- # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables +- # "aix,yes" lib.so shared, rtl:yes, for executables +- # lib.a static archive +- # "both,no" lib.so.V(shr.o) shared, rtl:yes +- # lib.a(lib.so.V) shared, rtl:no, for executables +- # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables +- # lib.a(lib.so.V) shared, rtl:no +- # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables +- # lib.a static archive +- case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) +- for ld_flag in $LDFLAGS; do +- case $ld_flag in +- *-brtl*) +- aix_use_runtimelinking=yes +- break +- ;; +- esac +- done +- if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then +- # With aix-soname=svr4, we create the lib.so.V shared archives only, +- # so we don't have lib.a shared libs to link our executables. +- # We have to force runtime linking in this case. +- aix_use_runtimelinking=yes +- LDFLAGS="$LDFLAGS -Wl,-brtl" +- fi +- ;; +- esac +- +- exp_sym_flag='-bexport' +- no_entry_flag='-bnoentry' +- fi +- +- # When large executables or shared objects are built, AIX ld can +- # have problems creating the table of contents. If linking a library +- # or program results in "error TOC overflow" add -mminimal-toc to +- # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not +- # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. +- +- _LT_TAGVAR(archive_cmds, $1)='' +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_direct_absolute, $1)=yes +- _LT_TAGVAR(hardcode_libdir_separator, $1)=':' +- _LT_TAGVAR(link_all_deplibs, $1)=yes +- _LT_TAGVAR(file_list_spec, $1)='$wl-f,' +- case $with_aix_soname,$aix_use_runtimelinking in +- aix,*) ;; # no import file +- svr4,* | *,yes) # use import file +- # The Import File defines what to hardcode. +- _LT_TAGVAR(hardcode_direct, $1)=no +- _LT_TAGVAR(hardcode_direct_absolute, $1)=no +- ;; +- esac +- +- if test yes = "$GXX"; then +- case $host_os in aix4.[[012]]|aix4.[[012]].*) +- # We only want to do this on AIX 4.2 and lower, the check +- # below for broken collect2 doesn't work under 4.3+ +- collect2name=`$CC -print-prog-name=collect2` +- if test -f "$collect2name" && +- strings "$collect2name" | $GREP resolve_lib_name >/dev/null +- then +- # We have reworked collect2 +- : +- else +- # We have old collect2 +- _LT_TAGVAR(hardcode_direct, $1)=unsupported +- # It fails to find uninstalled libraries when the uninstalled +- # path is not listed in the libpath. Setting hardcode_minus_L +- # to unsupported forces relinking +- _LT_TAGVAR(hardcode_minus_L, $1)=yes +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)= +- fi +- esac +- shared_flag='-shared' +- if test yes = "$aix_use_runtimelinking"; then +- shared_flag=$shared_flag' $wl-G' +- fi +- # Need to ensure runtime linking is disabled for the traditional +- # shared library, or the linker may eventually find shared libraries +- # /with/ Import File - we do not want to mix them. +- shared_flag_aix='-shared' +- shared_flag_svr4='-shared $wl-G' +- else +- # not using gcc +- if test ia64 = "$host_cpu"; then +- # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release +- # chokes on -Wl,-G. The following line is correct: +- shared_flag='-G' +- else +- if test yes = "$aix_use_runtimelinking"; then +- shared_flag='$wl-G' +- else +- shared_flag='$wl-bM:SRE' +- fi +- shared_flag_aix='$wl-bM:SRE' +- shared_flag_svr4='$wl-G' +- fi +- fi +- +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall' +- # It seems that -bexpall does not export symbols beginning with +- # underscore (_), so it is better to generate a list of symbols to +- # export. +- _LT_TAGVAR(always_export_symbols, $1)=yes +- if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then +- # Warning - without using the other runtime loading flags (-brtl), +- # -berok will link without error, but may produce a broken library. +- # The "-G" linker flag allows undefined symbols. +- _LT_TAGVAR(no_undefined_flag, $1)='-bernotok' +- # Determine the default libpath from the value encoded in an empty +- # executable. +- _LT_SYS_MODULE_PATH_AIX([$1]) +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" +- +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag +- else +- if test ia64 = "$host_cpu"; then +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib' +- _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" +- _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" +- else +- # Determine the default libpath from the value encoded in an +- # empty executable. +- _LT_SYS_MODULE_PATH_AIX([$1]) +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" +- # Warning - without using the other run time loading flags, +- # -berok will link without error, but may produce a broken library. +- _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok' +- _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok' +- if test yes = "$with_gnu_ld"; then +- # We only use this code for GNU lds that support --whole-archive. +- _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' +- else +- # Exported symbols can be pulled into shared objects from archives +- _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' +- fi +- _LT_TAGVAR(archive_cmds_need_lc, $1)=yes +- _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' +- # -brtl affects multiple linker settings, -berok does not and is overridden later +- compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`' +- if test svr4 != "$with_aix_soname"; then +- # This is similar to how AIX traditionally builds its shared +- # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. +- _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' +- fi +- if test aix != "$with_aix_soname"; then +- _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' +- else +- # used by -dlpreopen to get the symbols +- _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV $output_objdir/$realname.d/$soname $output_objdir' +- fi +- _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d' +- fi +- fi +- ;; +- +- beos*) +- if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported +- # Joseph Beckenbach says some releases of gcc +- # support --undefined. This deserves some investigation. FIXME +- _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- else +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; +- +- chorus*) +- case $cc_basename in +- *) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- esac +- ;; +- +- cygwin* | mingw* | pw32* | cegcc*) +- case $GXX,$cc_basename in +- ,cl* | no,cl* | ,icl* | no,icl*) +- # Native MSVC or ICC +- # hardcode_libdir_flag_spec is actually meaningless, as there is +- # no search path for DLLs. +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' +- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported +- _LT_TAGVAR(always_export_symbols, $1)=yes +- _LT_TAGVAR(file_list_spec, $1)='@' +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=.dll +- # FIXME: Setting linknames here is a bad hack. +- _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' +- _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then +- cp "$export_symbols" "$output_objdir/$soname.def"; +- echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; +- else +- $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; +- fi~ +- $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ +- linknames=' +- # The linker will not automatically build a static lib if we build a DLL. +- # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' +- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes +- # Don't use ranlib +- _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' +- _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ +- lt_tool_outputfile="@TOOL_OUTPUT@"~ +- case $lt_outputfile in +- *.exe|*.EXE) ;; +- *) +- lt_outputfile=$lt_outputfile.exe +- lt_tool_outputfile=$lt_tool_outputfile.exe +- ;; +- esac~ +- func_to_tool_file "$lt_outputfile"~ +- if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then +- $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; +- $RM "$lt_outputfile.manifest"; +- fi' +- ;; +- *) +- # g++ +- # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, +- # as there is no search path for DLLs. +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols' +- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported +- _LT_TAGVAR(always_export_symbols, $1)=no +- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes +- +- if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +- # If the export-symbols file already is a .def file, use it as +- # is; otherwise, prepend EXPORTS... +- _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then +- cp $export_symbols $output_objdir/$soname.def; +- else +- echo EXPORTS > $output_objdir/$soname.def; +- cat $export_symbols >> $output_objdir/$soname.def; +- fi~ +- $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +- else +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; +- esac +- ;; +- darwin* | rhapsody*) +- _LT_DARWIN_LINKER_FEATURES($1) +- ;; +- +- os2*) +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- _LT_TAGVAR(hardcode_minus_L, $1)=yes +- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported +- shrext_cmds=.dll +- _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ +- $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ +- $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ +- $ECHO EXPORTS >> $output_objdir/$libname.def~ +- emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ +- $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ +- emximp -o $lib $output_objdir/$libname.def' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ +- $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ +- $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ +- $ECHO EXPORTS >> $output_objdir/$libname.def~ +- prefix_cmds="$SED"~ +- if test EXPORTS = "`$SED 1q $export_symbols`"; then +- prefix_cmds="$prefix_cmds -e 1d"; +- fi~ +- prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ +- cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ +- $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ +- emximp -o $lib $output_objdir/$libname.def' +- _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' +- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes +- _LT_TAGVAR(file_list_spec, $1)='@' +- ;; +- +- dgux*) +- case $cc_basename in +- ec++*) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- ghcx*) +- # Green Hills C++ Compiler +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- *) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- esac +- ;; +- +- freebsd2.*) +- # C++ shared libraries reported to be fairly broken before +- # switch to ELF +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- +- freebsd-elf*) +- _LT_TAGVAR(archive_cmds_need_lc, $1)=no +- ;; +- +- freebsd* | dragonfly* | midnightbsd*) +- # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF +- # conventions +- _LT_TAGVAR(ld_shlibs, $1)=yes +- ;; +- +- haiku*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(link_all_deplibs, $1)=yes +- ;; +- +- hpux9*) +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, +- # but as the default +- # location of the library. +- +- case $cc_basename in +- CC*) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- aCC*) +- _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' +- # Commands to make compiler produce verbose output that lists +- # what "hidden" libraries, object files and flags are used when +- # linking a shared library. +- # +- # There doesn't appear to be a way to prevent this compiler from +- # explicitly linking system object files so we need to strip them +- # from the output so that they don't get included in the library +- # dependencies. +- output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' +- ;; +- *) +- if test yes = "$GXX"; then +- _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' +- else +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; +- esac +- ;; +- +- hpux10*|hpux11*) +- if test no = "$with_gnu_ld"; then +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- +- case $host_cpu in +- hppa*64*|ia64*) +- ;; +- *) +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' +- ;; +- esac +- fi +- case $host_cpu in +- hppa*64*|ia64*) +- _LT_TAGVAR(hardcode_direct, $1)=no +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- ;; +- *) +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_direct_absolute, $1)=yes +- _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, +- # but as the default +- # location of the library. +- ;; +- esac +- +- case $cc_basename in +- CC*) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- aCC*) +- case $host_cpu in +- hppa*64*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' +- ;; +- ia64*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' +- ;; +- *) +- _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' +- ;; +- esac +- # Commands to make compiler produce verbose output that lists +- # what "hidden" libraries, object files and flags are used when +- # linking a shared library. +- # +- # There doesn't appear to be a way to prevent this compiler from +- # explicitly linking system object files so we need to strip them +- # from the output so that they don't get included in the library +- # dependencies. +- output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' +- ;; +- *) +- if test yes = "$GXX"; then +- if test no = "$with_gnu_ld"; then +- case $host_cpu in +- hppa*64*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' +- ;; +- ia64*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' +- ;; +- *) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' +- ;; +- esac +- fi +- else +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; +- esac +- ;; +- +- interix[[3-9]]*) +- _LT_TAGVAR(hardcode_direct, $1)=no +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' +- # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. +- # Instead, shared libraries are loaded at an image base (0x10000000 by +- # default) and relocated if they conflict, which is a slow very memory +- # consuming and fragmenting process. To avoid this, we pick a random, +- # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link +- # time. Moving up from 0x10000000 also allows more sbrk(2) space. +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' +- ;; +- irix5* | irix6*) +- case $cc_basename in +- CC*) +- # SGI C++ +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' +- +- # Archives containing C++ object files must be created using +- # "CC -ar", where "CC" is the IRIX C++ compiler. This is +- # necessary to make sure instantiated templates are included +- # in the archive. +- _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' +- ;; +- *) +- if test yes = "$GXX"; then +- if test no = "$with_gnu_ld"; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' +- else +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' +- fi +- fi +- _LT_TAGVAR(link_all_deplibs, $1)=yes +- ;; +- esac +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- _LT_TAGVAR(inherit_rpath, $1)=yes +- ;; +- +- linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) +- case $cc_basename in +- KCC*) +- # Kuck and Associates, Inc. (KAI) C++ Compiler +- +- # KCC will only create a shared library if the output file +- # ends with ".so" (or ".sl" for HP-UX), so rename the library +- # to its proper name (with version) after linking. +- _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' +- # Commands to make compiler produce verbose output that lists +- # what "hidden" libraries, object files and flags are used when +- # linking a shared library. +- # +- # There doesn't appear to be a way to prevent this compiler from +- # explicitly linking system object files so we need to strip them +- # from the output so that they don't get included in the library +- # dependencies. +- output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' +- +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' +- +- # Archives containing C++ object files must be created using +- # "CC -Bstatic", where "CC" is the KAI C++ compiler. +- _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' +- ;; +- icpc* | ecpc* ) +- # Intel C++ +- with_gnu_ld=yes +- # version 8.0 and above of icpc choke on multiply defined symbols +- # if we add $predep_objects and $postdep_objects, however 7.1 and +- # earlier do not add the objects themselves. +- case `$CC -V 2>&1` in +- *"Version 7."*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' +- ;; +- *) # Version 8.0 or newer +- tmp_idyn= +- case $host_cpu in +- ia64*) tmp_idyn=' -i_dynamic';; +- esac +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' +- ;; +- esac +- _LT_TAGVAR(archive_cmds_need_lc, $1)=no +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' +- _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' +- ;; +- pgCC* | pgcpp*) +- # Portland Group C++ compiler +- case `$CC -V` in +- *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*) +- _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ +- rm -rf $tpldir~ +- $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ +- compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' +- _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ +- rm -rf $tpldir~ +- $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ +- $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ +- $RANLIB $oldlib' +- _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ +- rm -rf $tpldir~ +- $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ +- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ +- rm -rf $tpldir~ +- $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ +- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' +- ;; +- *) # Version 6 and above use weak symbols +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' +- ;; +- esac +- +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl--rpath $wl$libdir' +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' +- _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' +- ;; +- cxx*) +- # Compaq C++ +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' +- +- runpath_var=LD_RUN_PATH +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- +- # Commands to make compiler produce verbose output that lists +- # what "hidden" libraries, object files and flags are used when +- # linking a shared library. +- # +- # There doesn't appear to be a way to prevent this compiler from +- # explicitly linking system object files so we need to strip them +- # from the output so that they don't get included in the library +- # dependencies. +- output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' +- ;; +- xl* | mpixl* | bgxl*) +- # IBM XL 8.0 on PPC, with GNU ld +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' +- _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- if test yes = "$supports_anon_versioning"; then +- _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ +- cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +- echo "local: *; };" >> $output_objdir/$libname.ver~ +- $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' +- fi +- ;; +- *) +- case `$CC -V 2>&1 | $SED 5q` in +- *Sun\ C*) +- # Sun C++ 5.9 +- _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' +- _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' +- _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' +- _LT_TAGVAR(compiler_needs_object, $1)=yes +- +- # Not sure whether something based on +- # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 +- # would be better. +- output_verbose_link_cmd='func_echo_all' +- +- # Archives containing C++ object files must be created using +- # "CC -xar", where "CC" is the Sun C++ compiler. This is +- # necessary to make sure instantiated templates are included +- # in the archive. +- _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' +- ;; +- esac +- ;; +- esac +- ;; +- +- lynxos*) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- +- m88k*) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- +- mvs*) +- case $cc_basename in +- cxx*) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- *) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- esac +- ;; +- +- netbsd*) +- if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then +- _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' +- wlarc= +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- fi +- # Workaround some broken pre-1.5 toolchains +- output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' +- ;; +- +- *nto* | *qnx*) +- _LT_TAGVAR(ld_shlibs, $1)=yes +- ;; +- +- openbsd* | bitrig*) +- if test -f /usr/libexec/ld.so; then +- _LT_TAGVAR(hardcode_direct, $1)=yes +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- _LT_TAGVAR(hardcode_direct_absolute, $1)=yes +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' +- if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' +- _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' +- fi +- output_verbose_link_cmd=func_echo_all +- else +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; +- +- osf3* | osf4* | osf5*) +- case $cc_basename in +- KCC*) +- # Kuck and Associates, Inc. (KAI) C++ Compiler +- +- # KCC will only create a shared library if the output file +- # ends with ".so" (or ".sl" for HP-UX), so rename the library +- # to its proper name (with version) after linking. +- _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' +- +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- +- # Archives containing C++ object files must be created using +- # the KAI C++ compiler. +- case $host in +- osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; +- *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; +- esac +- ;; +- RCC*) +- # Rational C++ 2.4.1 +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- cxx*) +- case $host in +- osf3*) +- _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' +- ;; +- *) +- _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ +- echo "-hidden">> $lib.exp~ +- $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ +- $RM $lib.exp' +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' +- ;; +- esac +- +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- +- # Commands to make compiler produce verbose output that lists +- # what "hidden" libraries, object files and flags are used when +- # linking a shared library. +- # +- # There doesn't appear to be a way to prevent this compiler from +- # explicitly linking system object files so we need to strip them +- # from the output so that they don't get included in the library +- # dependencies. +- output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' +- ;; +- *) +- if test yes,no = "$GXX,$with_gnu_ld"; then +- _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' +- case $host in +- osf3*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' +- ;; +- *) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' +- ;; +- esac +- +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=: +- +- # Commands to make compiler produce verbose output that lists +- # what "hidden" libraries, object files and flags are used when +- # linking a shared library. +- output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' +- +- else +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; +- esac +- ;; +- +- psos*) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- +- sunos4*) +- case $cc_basename in +- CC*) +- # Sun C++ 4.x +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- lcc*) +- # Lucid +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- *) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- esac +- ;; +- +- solaris*) +- case $cc_basename in +- CC* | sunCC*) +- # Sun C++ 4.2, 5.x and Centerline C++ +- _LT_TAGVAR(archive_cmds_need_lc,$1)=yes +- _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' +- _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' +- +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- case $host_os in +- solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; +- *) +- # The compiler driver will combine and reorder linker options, +- # but understands '-z linker_flag'. +- # Supported since Solaris 2.6 (maybe 2.5.1?) +- _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' +- ;; +- esac +- _LT_TAGVAR(link_all_deplibs, $1)=yes +- +- output_verbose_link_cmd='func_echo_all' +- +- # Archives containing C++ object files must be created using +- # "CC -xar", where "CC" is the Sun C++ compiler. This is +- # necessary to make sure instantiated templates are included +- # in the archive. +- _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' +- ;; +- gcx*) +- # Green Hills C++ Compiler +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' +- +- # The C++ compiler must be used to create the archive. +- _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' +- ;; +- *) +- # GNU C++ compiler with Solaris linker +- if test yes,no = "$GXX,$with_gnu_ld"; then +- _LT_TAGVAR(no_undefined_flag, $1)=' $wl-z ${wl}defs' +- if $CC --version | $GREP -v '^2\.7' > /dev/null; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' +- +- # Commands to make compiler produce verbose output that lists +- # what "hidden" libraries, object files and flags are used when +- # linking a shared library. +- output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' +- else +- # g++ 2.7 appears to require '-G' NOT '-shared' on this +- # platform. +- _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' +- +- # Commands to make compiler produce verbose output that lists +- # what "hidden" libraries, object files and flags are used when +- # linking a shared library. +- output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' +- fi +- +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $wl$libdir' +- case $host_os in +- solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; +- *) +- _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' +- ;; +- esac +- fi +- ;; +- esac +- ;; +- +- sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) +- _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' +- _LT_TAGVAR(archive_cmds_need_lc, $1)=no +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- runpath_var='LD_RUN_PATH' +- +- case $cc_basename in +- CC*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- ;; +- *) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- ;; +- esac +- ;; +- +- sysv5* | sco3.2v5* | sco5v6*) +- # Note: We CANNOT use -z defs as we might desire, because we do not +- # link with -lc, and that would cause any symbols used from libc to +- # always be unresolved, which means just about no library would +- # ever link correctly. If we're not using GNU ld we use -z text +- # though, which does catch some bad symbols but isn't as heavy-handed +- # as -z defs. +- _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' +- _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs' +- _LT_TAGVAR(archive_cmds_need_lc, $1)=no +- _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir' +- _LT_TAGVAR(hardcode_libdir_separator, $1)=':' +- _LT_TAGVAR(link_all_deplibs, $1)=yes +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport' +- runpath_var='LD_RUN_PATH' +- +- case $cc_basename in +- CC*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~ +- '"$_LT_TAGVAR(old_archive_cmds, $1)" +- _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~ +- '"$_LT_TAGVAR(reload_cmds, $1)" +- ;; +- *) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- ;; +- esac +- ;; +- +- tandem*) +- case $cc_basename in +- NCC*) +- # NonStop-UX NCC 3.20 +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- *) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- esac +- ;; +- +- vxworks*) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- +- *) +- # FIXME: insert proper C++ library support +- _LT_TAGVAR(ld_shlibs, $1)=no +- ;; +- esac +- +- AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) +- test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no +- +- _LT_TAGVAR(GCC, $1)=$GXX +- _LT_TAGVAR(LD, $1)=$LD +- +- ## CAVEAT EMPTOR: +- ## There is no encapsulation within the following macros, do not change +- ## the running order or otherwise move them around unless you know exactly +- ## what you are doing... +- _LT_SYS_HIDDEN_LIBDEPS($1) +- _LT_COMPILER_PIC($1) +- _LT_COMPILER_C_O($1) +- _LT_COMPILER_FILE_LOCKS($1) +- _LT_LINKER_SHLIBS($1) +- _LT_SYS_DYNAMIC_LINKER($1) +- _LT_LINKER_HARDCODE_LIBPATH($1) +- +- _LT_CONFIG($1) +- fi # test -n "$compiler" +- +- CC=$lt_save_CC +- CFLAGS=$lt_save_CFLAGS +- LDCXX=$LD +- LD=$lt_save_LD +- GCC=$lt_save_GCC +- with_gnu_ld=$lt_save_with_gnu_ld +- lt_cv_path_LDCXX=$lt_cv_path_LD +- lt_cv_path_LD=$lt_save_path_LD +- lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld +- lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +-fi # test yes != "$_lt_caught_CXX_error" +- +-AC_LANG_POP +-])# _LT_LANG_CXX_CONFIG +- +- +-# _LT_FUNC_STRIPNAME_CNF +-# ---------------------- +-# func_stripname_cnf prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-# +-# This function is identical to the (non-XSI) version of func_stripname, +-# except this one can be used by m4 code that may be executed by configure, +-# rather than the libtool script. +-m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl +-AC_REQUIRE([_LT_DECL_SED]) +-AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) +-func_stripname_cnf () +-{ +- case @S|@2 in +- .*) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%\\\\@S|@2\$%%"`;; +- *) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%@S|@2\$%%"`;; +- esac +-} # func_stripname_cnf +-])# _LT_FUNC_STRIPNAME_CNF +- +- +-# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) +-# --------------------------------- +-# Figure out "hidden" library dependencies from verbose +-# compiler output when linking a shared library. +-# Parse the compiler output and extract the necessary +-# objects, libraries and library flags. +-m4_defun([_LT_SYS_HIDDEN_LIBDEPS], +-[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +-AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl +-# Dependencies to place before and after the object being linked: +-_LT_TAGVAR(predep_objects, $1)= +-_LT_TAGVAR(postdep_objects, $1)= +-_LT_TAGVAR(predeps, $1)= +-_LT_TAGVAR(postdeps, $1)= +-_LT_TAGVAR(compiler_lib_search_path, $1)= +- +-dnl we can't use the lt_simple_compile_test_code here, +-dnl because it contains code intended for an executable, +-dnl not a library. It's possible we should let each +-dnl tag define a new lt_????_link_test_code variable, +-dnl but it's only used here... +-m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF +-int a; +-void foo (void) { a = 0; } +-_LT_EOF +-], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF +-class Foo +-{ +-public: +- Foo (void) { a = 0; } +-private: +- int a; +-}; +-_LT_EOF +-], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF +- subroutine foo +- implicit none +- integer*4 a +- a=0 +- return +- end +-_LT_EOF +-], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF +- subroutine foo +- implicit none +- integer a +- a=0 +- return +- end +-_LT_EOF +-], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF +-public class foo { +- private int a; +- public void bar (void) { +- a = 0; +- } +-}; +-_LT_EOF +-], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF +-package foo +-func foo() { +-} +-_LT_EOF +-]) +- +-_lt_libdeps_save_CFLAGS=$CFLAGS +-case "$CC $CFLAGS " in #( +-*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; +-*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; +-*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; +-esac +- +-dnl Parse the compiler output and extract the necessary +-dnl objects, libraries and library flags. +-if AC_TRY_EVAL(ac_compile); then +- # Parse the compiler output and extract the necessary +- # objects, libraries and library flags. +- +- # Sentinel used to keep track of whether or not we are before +- # the conftest object file. +- pre_test_object_deps_done=no +- +- for p in `eval "$output_verbose_link_cmd"`; do +- case $prev$p in +- +- -L* | -R* | -l*) +- # Some compilers place space between "-{L,R}" and the path. +- # Remove the space. +- if test x-L = "$p" || +- test x-R = "$p"; then +- prev=$p +- continue +- fi +- +- # Expand the sysroot to ease extracting the directories later. +- if test -z "$prev"; then +- case $p in +- -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; +- -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; +- -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; +- esac +- fi +- case $p in +- =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; +- esac +- if test no = "$pre_test_object_deps_done"; then +- case $prev in +- -L | -R) +- # Internal compiler library paths should come after those +- # provided the user. The postdeps already come after the +- # user supplied libs so there is no need to process them. +- if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then +- _LT_TAGVAR(compiler_lib_search_path, $1)=$prev$p +- else +- _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} $prev$p" +- fi +- ;; +- # The "-l" case would never come before the object being +- # linked, so don't bother handling this case. +- esac +- else +- if test -z "$_LT_TAGVAR(postdeps, $1)"; then +- _LT_TAGVAR(postdeps, $1)=$prev$p +- else +- _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} $prev$p" +- fi +- fi +- prev= +- ;; +- +- *.lto.$objext) ;; # Ignore GCC LTO objects +- *.$objext) +- # This assumes that the test object file only shows up +- # once in the compiler output. +- if test "$p" = "conftest.$objext"; then +- pre_test_object_deps_done=yes +- continue +- fi +- +- if test no = "$pre_test_object_deps_done"; then +- if test -z "$_LT_TAGVAR(predep_objects, $1)"; then +- _LT_TAGVAR(predep_objects, $1)=$p +- else +- _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" +- fi +- else +- if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then +- _LT_TAGVAR(postdep_objects, $1)=$p +- else +- _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" +- fi +- fi +- ;; +- +- *) ;; # Ignore the rest. +- +- esac +- done +- +- # Clean up. +- rm -f a.out a.exe +-else +- echo "libtool.m4: error: problem compiling $1 test program" +-fi +- +-$RM -f confest.$objext +-CFLAGS=$_lt_libdeps_save_CFLAGS +- +-# PORTME: override above test on systems where it is broken +-m4_if([$1], [CXX], +-[case $host_os in +-interix[[3-9]]*) +- # Interix 3.5 installs completely hosed .la files for C++, so rather than +- # hack all around it, let's just trust "g++" to DTRT. +- _LT_TAGVAR(predep_objects,$1)= +- _LT_TAGVAR(postdep_objects,$1)= +- _LT_TAGVAR(postdeps,$1)= +- ;; +-esac +-]) +- +-case " $_LT_TAGVAR(postdeps, $1) " in +-*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; +-esac +- _LT_TAGVAR(compiler_lib_search_dirs, $1)= +-if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then +- _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | $SED -e 's! -L! !g' -e 's!^ !!'` +-fi +-_LT_TAGDECL([], [compiler_lib_search_dirs], [1], +- [The directories searched by this compiler when creating a shared library]) +-_LT_TAGDECL([], [predep_objects], [1], +- [Dependencies to place before and after the objects being linked to +- create a shared library]) +-_LT_TAGDECL([], [postdep_objects], [1]) +-_LT_TAGDECL([], [predeps], [1]) +-_LT_TAGDECL([], [postdeps], [1]) +-_LT_TAGDECL([], [compiler_lib_search_path], [1], +- [The library search path used internally by the compiler when linking +- a shared library]) +-])# _LT_SYS_HIDDEN_LIBDEPS +- +- +-# _LT_LANG_F77_CONFIG([TAG]) +-# -------------------------- +-# Ensure that the configuration variables for a Fortran 77 compiler are +-# suitably defined. These variables are subsequently used by _LT_CONFIG +-# to write the compiler configuration to 'libtool'. +-m4_defun([_LT_LANG_F77_CONFIG], +-[AC_LANG_PUSH(Fortran 77) +-if test -z "$F77" || test no = "$F77"; then +- _lt_disable_F77=yes +-fi +- +-_LT_TAGVAR(archive_cmds_need_lc, $1)=no +-_LT_TAGVAR(allow_undefined_flag, $1)= +-_LT_TAGVAR(always_export_symbols, $1)=no +-_LT_TAGVAR(archive_expsym_cmds, $1)= +-_LT_TAGVAR(export_dynamic_flag_spec, $1)= +-_LT_TAGVAR(hardcode_direct, $1)=no +-_LT_TAGVAR(hardcode_direct_absolute, $1)=no +-_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +-_LT_TAGVAR(hardcode_libdir_separator, $1)= +-_LT_TAGVAR(hardcode_minus_L, $1)=no +-_LT_TAGVAR(hardcode_automatic, $1)=no +-_LT_TAGVAR(inherit_rpath, $1)=no +-_LT_TAGVAR(module_cmds, $1)= +-_LT_TAGVAR(module_expsym_cmds, $1)= +-_LT_TAGVAR(link_all_deplibs, $1)=unknown +-_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +-_LT_TAGVAR(reload_flag, $1)=$reload_flag +-_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +-_LT_TAGVAR(no_undefined_flag, $1)= +-_LT_TAGVAR(whole_archive_flag_spec, $1)= +-_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no +- +-# Source file extension for f77 test sources. +-ac_ext=f +- +-# Object file extension for compiled f77 test sources. +-objext=o +-_LT_TAGVAR(objext, $1)=$objext +- +-# No sense in running all these tests if we already determined that +-# the F77 compiler isn't working. Some variables (like enable_shared) +-# are currently assumed to apply to all compilers on this platform, +-# and will be corrupted by setting them based on a non-working compiler. +-if test yes != "$_lt_disable_F77"; then +- # Code to be used in simple compile tests +- lt_simple_compile_test_code="\ +- subroutine t +- return +- end +-" +- +- # Code to be used in simple link tests +- lt_simple_link_test_code="\ +- program t +- end +-" +- +- # ltmain only uses $CC for tagged configurations so make sure $CC is set. +- _LT_TAG_COMPILER +- +- # save warnings/boilerplate of simple test code +- _LT_COMPILER_BOILERPLATE +- _LT_LINKER_BOILERPLATE +- +- # Allow CC to be a program name with arguments. +- lt_save_CC=$CC +- lt_save_GCC=$GCC +- lt_save_CFLAGS=$CFLAGS +- CC=${F77-"f77"} +- CFLAGS=$FFLAGS +- compiler=$CC +- _LT_TAGVAR(compiler, $1)=$CC +- _LT_CC_BASENAME([$compiler]) +- GCC=$G77 +- if test -n "$compiler"; then +- AC_MSG_CHECKING([if libtool supports shared libraries]) +- AC_MSG_RESULT([$can_build_shared]) +- +- AC_MSG_CHECKING([whether to build shared libraries]) +- test no = "$can_build_shared" && enable_shared=no +- +- # On AIX, shared libraries and static libraries use the same namespace, and +- # are all built from PIC. +- case $host_os in +- aix3*) +- test yes = "$enable_shared" && enable_static=no +- if test -n "$RANLIB"; then +- archive_cmds="$archive_cmds~\$RANLIB \$lib" +- postinstall_cmds='$RANLIB $lib' +- fi +- ;; +- aix[[4-9]]*) +- if test ia64 != "$host_cpu"; then +- case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in +- yes,aix,yes) ;; # shared object as lib.so file only +- yes,svr4,*) ;; # shared object as lib.so archive member only +- yes,*) enable_static=no ;; # shared object in lib.a archive as well +- esac +- fi +- ;; +- esac +- AC_MSG_RESULT([$enable_shared]) +- +- AC_MSG_CHECKING([whether to build static libraries]) +- # Make sure either enable_shared or enable_static is yes. +- test yes = "$enable_shared" || enable_static=yes +- AC_MSG_RESULT([$enable_static]) +- +- _LT_TAGVAR(GCC, $1)=$G77 +- _LT_TAGVAR(LD, $1)=$LD +- +- ## CAVEAT EMPTOR: +- ## There is no encapsulation within the following macros, do not change +- ## the running order or otherwise move them around unless you know exactly +- ## what you are doing... +- _LT_COMPILER_PIC($1) +- _LT_COMPILER_C_O($1) +- _LT_COMPILER_FILE_LOCKS($1) +- _LT_LINKER_SHLIBS($1) +- _LT_SYS_DYNAMIC_LINKER($1) +- _LT_LINKER_HARDCODE_LIBPATH($1) +- +- _LT_CONFIG($1) +- fi # test -n "$compiler" +- +- GCC=$lt_save_GCC +- CC=$lt_save_CC +- CFLAGS=$lt_save_CFLAGS +-fi # test yes != "$_lt_disable_F77" +- +-AC_LANG_POP +-])# _LT_LANG_F77_CONFIG +- +- +-# _LT_LANG_FC_CONFIG([TAG]) +-# ------------------------- +-# Ensure that the configuration variables for a Fortran compiler are +-# suitably defined. These variables are subsequently used by _LT_CONFIG +-# to write the compiler configuration to 'libtool'. +-m4_defun([_LT_LANG_FC_CONFIG], +-[AC_LANG_PUSH(Fortran) +- +-if test -z "$FC" || test no = "$FC"; then +- _lt_disable_FC=yes +-fi +- +-_LT_TAGVAR(archive_cmds_need_lc, $1)=no +-_LT_TAGVAR(allow_undefined_flag, $1)= +-_LT_TAGVAR(always_export_symbols, $1)=no +-_LT_TAGVAR(archive_expsym_cmds, $1)= +-_LT_TAGVAR(export_dynamic_flag_spec, $1)= +-_LT_TAGVAR(hardcode_direct, $1)=no +-_LT_TAGVAR(hardcode_direct_absolute, $1)=no +-_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +-_LT_TAGVAR(hardcode_libdir_separator, $1)= +-_LT_TAGVAR(hardcode_minus_L, $1)=no +-_LT_TAGVAR(hardcode_automatic, $1)=no +-_LT_TAGVAR(inherit_rpath, $1)=no +-_LT_TAGVAR(module_cmds, $1)= +-_LT_TAGVAR(module_expsym_cmds, $1)= +-_LT_TAGVAR(link_all_deplibs, $1)=unknown +-_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +-_LT_TAGVAR(reload_flag, $1)=$reload_flag +-_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +-_LT_TAGVAR(no_undefined_flag, $1)= +-_LT_TAGVAR(whole_archive_flag_spec, $1)= +-_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no +- +-# Source file extension for fc test sources. +-ac_ext=${ac_fc_srcext-f} +- +-# Object file extension for compiled fc test sources. +-objext=o +-_LT_TAGVAR(objext, $1)=$objext +- +-# No sense in running all these tests if we already determined that +-# the FC compiler isn't working. Some variables (like enable_shared) +-# are currently assumed to apply to all compilers on this platform, +-# and will be corrupted by setting them based on a non-working compiler. +-if test yes != "$_lt_disable_FC"; then +- # Code to be used in simple compile tests +- lt_simple_compile_test_code="\ +- subroutine t +- return +- end +-" +- +- # Code to be used in simple link tests +- lt_simple_link_test_code="\ +- program t +- end +-" +- +- # ltmain only uses $CC for tagged configurations so make sure $CC is set. +- _LT_TAG_COMPILER +- +- # save warnings/boilerplate of simple test code +- _LT_COMPILER_BOILERPLATE +- _LT_LINKER_BOILERPLATE +- +- # Allow CC to be a program name with arguments. +- lt_save_CC=$CC +- lt_save_GCC=$GCC +- lt_save_CFLAGS=$CFLAGS +- CC=${FC-"f95"} +- CFLAGS=$FCFLAGS +- compiler=$CC +- GCC=$ac_cv_fc_compiler_gnu +- +- _LT_TAGVAR(compiler, $1)=$CC +- _LT_CC_BASENAME([$compiler]) +- +- if test -n "$compiler"; then +- AC_MSG_CHECKING([if libtool supports shared libraries]) +- AC_MSG_RESULT([$can_build_shared]) +- +- AC_MSG_CHECKING([whether to build shared libraries]) +- test no = "$can_build_shared" && enable_shared=no +- +- # On AIX, shared libraries and static libraries use the same namespace, and +- # are all built from PIC. +- case $host_os in +- aix3*) +- test yes = "$enable_shared" && enable_static=no +- if test -n "$RANLIB"; then +- archive_cmds="$archive_cmds~\$RANLIB \$lib" +- postinstall_cmds='$RANLIB $lib' +- fi +- ;; +- aix[[4-9]]*) +- if test ia64 != "$host_cpu"; then +- case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in +- yes,aix,yes) ;; # shared object as lib.so file only +- yes,svr4,*) ;; # shared object as lib.so archive member only +- yes,*) enable_static=no ;; # shared object in lib.a archive as well +- esac +- fi +- ;; +- esac +- AC_MSG_RESULT([$enable_shared]) +- +- AC_MSG_CHECKING([whether to build static libraries]) +- # Make sure either enable_shared or enable_static is yes. +- test yes = "$enable_shared" || enable_static=yes +- AC_MSG_RESULT([$enable_static]) +- +- _LT_TAGVAR(GCC, $1)=$ac_cv_fc_compiler_gnu +- _LT_TAGVAR(LD, $1)=$LD +- +- ## CAVEAT EMPTOR: +- ## There is no encapsulation within the following macros, do not change +- ## the running order or otherwise move them around unless you know exactly +- ## what you are doing... +- _LT_SYS_HIDDEN_LIBDEPS($1) +- _LT_COMPILER_PIC($1) +- _LT_COMPILER_C_O($1) +- _LT_COMPILER_FILE_LOCKS($1) +- _LT_LINKER_SHLIBS($1) +- _LT_SYS_DYNAMIC_LINKER($1) +- _LT_LINKER_HARDCODE_LIBPATH($1) +- +- _LT_CONFIG($1) +- fi # test -n "$compiler" +- +- GCC=$lt_save_GCC +- CC=$lt_save_CC +- CFLAGS=$lt_save_CFLAGS +-fi # test yes != "$_lt_disable_FC" +- +-AC_LANG_POP +-])# _LT_LANG_FC_CONFIG +- +- +-# _LT_LANG_GCJ_CONFIG([TAG]) +-# -------------------------- +-# Ensure that the configuration variables for the GNU Java Compiler compiler +-# are suitably defined. These variables are subsequently used by _LT_CONFIG +-# to write the compiler configuration to 'libtool'. +-m4_defun([_LT_LANG_GCJ_CONFIG], +-[AC_REQUIRE([LT_PROG_GCJ])dnl +-AC_LANG_SAVE +- +-# Source file extension for Java test sources. +-ac_ext=java +- +-# Object file extension for compiled Java test sources. +-objext=o +-_LT_TAGVAR(objext, $1)=$objext +- +-# Code to be used in simple compile tests +-lt_simple_compile_test_code="class foo {}" +- +-# Code to be used in simple link tests +-lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' +- +-# ltmain only uses $CC for tagged configurations so make sure $CC is set. +-_LT_TAG_COMPILER +- +-# save warnings/boilerplate of simple test code +-_LT_COMPILER_BOILERPLATE +-_LT_LINKER_BOILERPLATE +- +-# Allow CC to be a program name with arguments. +-lt_save_CC=$CC +-lt_save_CFLAGS=$CFLAGS +-lt_save_GCC=$GCC +-GCC=yes +-CC=${GCJ-"gcj"} +-CFLAGS=$GCJFLAGS +-compiler=$CC +-_LT_TAGVAR(compiler, $1)=$CC +-_LT_TAGVAR(LD, $1)=$LD +-_LT_CC_BASENAME([$compiler]) +- +-# GCJ did not exist at the time GCC didn't implicitly link libc in. +-_LT_TAGVAR(archive_cmds_need_lc, $1)=no +- +-_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +-_LT_TAGVAR(reload_flag, $1)=$reload_flag +-_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +- +-if test -n "$compiler"; then +- _LT_COMPILER_NO_RTTI($1) +- _LT_COMPILER_PIC($1) +- _LT_COMPILER_C_O($1) +- _LT_COMPILER_FILE_LOCKS($1) +- _LT_LINKER_SHLIBS($1) +- _LT_LINKER_HARDCODE_LIBPATH($1) +- +- _LT_CONFIG($1) +-fi +- +-AC_LANG_RESTORE +- +-GCC=$lt_save_GCC +-CC=$lt_save_CC +-CFLAGS=$lt_save_CFLAGS +-])# _LT_LANG_GCJ_CONFIG +- +- +-# _LT_LANG_GO_CONFIG([TAG]) +-# -------------------------- +-# Ensure that the configuration variables for the GNU Go compiler +-# are suitably defined. These variables are subsequently used by _LT_CONFIG +-# to write the compiler configuration to 'libtool'. +-m4_defun([_LT_LANG_GO_CONFIG], +-[AC_REQUIRE([LT_PROG_GO])dnl +-AC_LANG_SAVE +- +-# Source file extension for Go test sources. +-ac_ext=go +- +-# Object file extension for compiled Go test sources. +-objext=o +-_LT_TAGVAR(objext, $1)=$objext +- +-# Code to be used in simple compile tests +-lt_simple_compile_test_code="package main; func main() { }" +- +-# Code to be used in simple link tests +-lt_simple_link_test_code='package main; func main() { }' +- +-# ltmain only uses $CC for tagged configurations so make sure $CC is set. +-_LT_TAG_COMPILER +- +-# save warnings/boilerplate of simple test code +-_LT_COMPILER_BOILERPLATE +-_LT_LINKER_BOILERPLATE +- +-# Allow CC to be a program name with arguments. +-lt_save_CC=$CC +-lt_save_CFLAGS=$CFLAGS +-lt_save_GCC=$GCC +-GCC=yes +-CC=${GOC-"gccgo"} +-CFLAGS=$GOFLAGS +-compiler=$CC +-_LT_TAGVAR(compiler, $1)=$CC +-_LT_TAGVAR(LD, $1)=$LD +-_LT_CC_BASENAME([$compiler]) +- +-# Go did not exist at the time GCC didn't implicitly link libc in. +-_LT_TAGVAR(archive_cmds_need_lc, $1)=no +- +-_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +-_LT_TAGVAR(reload_flag, $1)=$reload_flag +-_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +- +-if test -n "$compiler"; then +- _LT_COMPILER_NO_RTTI($1) +- _LT_COMPILER_PIC($1) +- _LT_COMPILER_C_O($1) +- _LT_COMPILER_FILE_LOCKS($1) +- _LT_LINKER_SHLIBS($1) +- _LT_LINKER_HARDCODE_LIBPATH($1) +- +- _LT_CONFIG($1) +-fi +- +-AC_LANG_RESTORE +- +-GCC=$lt_save_GCC +-CC=$lt_save_CC +-CFLAGS=$lt_save_CFLAGS +-])# _LT_LANG_GO_CONFIG +- +- +-# _LT_LANG_RC_CONFIG([TAG]) +-# ------------------------- +-# Ensure that the configuration variables for the Windows resource compiler +-# are suitably defined. These variables are subsequently used by _LT_CONFIG +-# to write the compiler configuration to 'libtool'. +-m4_defun([_LT_LANG_RC_CONFIG], +-[AC_REQUIRE([LT_PROG_RC])dnl +-AC_LANG_SAVE +- +-# Source file extension for RC test sources. +-ac_ext=rc +- +-# Object file extension for compiled RC test sources. +-objext=o +-_LT_TAGVAR(objext, $1)=$objext +- +-# Code to be used in simple compile tests +-lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' +- +-# Code to be used in simple link tests +-lt_simple_link_test_code=$lt_simple_compile_test_code +- +-# ltmain only uses $CC for tagged configurations so make sure $CC is set. +-_LT_TAG_COMPILER +- +-# save warnings/boilerplate of simple test code +-_LT_COMPILER_BOILERPLATE +-_LT_LINKER_BOILERPLATE +- +-# Allow CC to be a program name with arguments. +-lt_save_CC=$CC +-lt_save_CFLAGS=$CFLAGS +-lt_save_GCC=$GCC +-GCC= +-CC=${RC-"windres"} +-CFLAGS= +-compiler=$CC +-_LT_TAGVAR(compiler, $1)=$CC +-_LT_CC_BASENAME([$compiler]) +-_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes +- +-if test -n "$compiler"; then +- : +- _LT_CONFIG($1) +-fi +- +-GCC=$lt_save_GCC +-AC_LANG_RESTORE +-CC=$lt_save_CC +-CFLAGS=$lt_save_CFLAGS +-])# _LT_LANG_RC_CONFIG +- +- +-# LT_PROG_GCJ +-# ----------- +-AC_DEFUN([LT_PROG_GCJ], +-[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], +- [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], +- [AC_CHECK_TOOL(GCJ, gcj,) +- test set = "${GCJFLAGS+set}" || GCJFLAGS="-g -O2" +- AC_SUBST(GCJFLAGS)])])[]dnl +-]) +- +-# Old name: +-AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([LT_AC_PROG_GCJ], []) +- +- +-# LT_PROG_GO +-# ---------- +-AC_DEFUN([LT_PROG_GO], +-[AC_CHECK_TOOL(GOC, gccgo,) +-]) +- +- +-# LT_PROG_RC +-# ---------- +-AC_DEFUN([LT_PROG_RC], +-[AC_CHECK_TOOL(RC, windres,) +-]) +- +-# Old name: +-AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([LT_AC_PROG_RC], []) +- +- +-# _LT_DECL_EGREP +-# -------------- +-# If we don't have a new enough Autoconf to choose the best grep +-# available, choose the one first in the user's PATH. +-m4_defun([_LT_DECL_EGREP], +-[AC_REQUIRE([AC_PROG_EGREP])dnl +-AC_REQUIRE([AC_PROG_FGREP])dnl +-test -z "$GREP" && GREP=grep +-_LT_DECL([], [GREP], [1], [A grep program that handles long lines]) +-_LT_DECL([], [EGREP], [1], [An ERE matcher]) +-_LT_DECL([], [FGREP], [1], [A literal string matcher]) +-dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too +-AC_SUBST([GREP]) +-]) +- +- +-# _LT_DECL_OBJDUMP +-# -------------- +-# If we don't have a new enough Autoconf to choose the best objdump +-# available, choose the one first in the user's PATH. +-m4_defun([_LT_DECL_OBJDUMP], +-[AC_CHECK_TOOL(OBJDUMP, objdump, false) +-test -z "$OBJDUMP" && OBJDUMP=objdump +-_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) +-AC_SUBST([OBJDUMP]) +-]) +- +-# _LT_DECL_DLLTOOL +-# ---------------- +-# Ensure DLLTOOL variable is set. +-m4_defun([_LT_DECL_DLLTOOL], +-[AC_CHECK_TOOL(DLLTOOL, dlltool, false) +-test -z "$DLLTOOL" && DLLTOOL=dlltool +-_LT_DECL([], [DLLTOOL], [1], [DLL creation program]) +-AC_SUBST([DLLTOOL]) +-]) +- +-# _LT_DECL_FILECMD +-# ---------------- +-# Check for a file(cmd) program that can be used to detect file type and magic +-m4_defun([_LT_DECL_FILECMD], +-[AC_CHECK_TOOL([FILECMD], [file], [:]) +-_LT_DECL([], [FILECMD], [1], [A file(cmd) program that detects file types]) +-])# _LD_DECL_FILECMD +- +-# _LT_DECL_SED +-# ------------ +-# Check for a fully-functional sed program, that truncates +-# as few characters as possible. Prefer GNU sed if found. +-m4_defun([_LT_DECL_SED], +-[AC_PROG_SED +-test -z "$SED" && SED=sed +-Xsed="$SED -e 1s/^X//" +-_LT_DECL([], [SED], [1], [A sed program that does not truncate output]) +-_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], +- [Sed that helps us avoid accidentally triggering echo(1) options like -n]) +-])# _LT_DECL_SED +- +-m4_ifndef([AC_PROG_SED], [ +-# NOTE: This macro has been submitted for inclusion into # +-# GNU Autoconf as AC_PROG_SED. When it is available in # +-# a released version of Autoconf we should remove this # +-# macro and use it instead. # +- +-m4_defun([AC_PROG_SED], +-[AC_MSG_CHECKING([for a sed that does not truncate output]) +-AC_CACHE_VAL(lt_cv_path_SED, +-[# Loop through the user's path and test for sed and gsed. +-# Then use that list of sed's as ones to test for truncation. +-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH +-do +- IFS=$as_save_IFS +- test -z "$as_dir" && as_dir=. +- for lt_ac_prog in sed gsed; do +- for ac_exec_ext in '' $ac_executable_extensions; do +- if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then +- lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" +- fi +- done +- done +-done +-IFS=$as_save_IFS +-lt_ac_max=0 +-lt_ac_count=0 +-# Add /usr/xpg4/bin/sed as it is typically found on Solaris +-# along with /bin/sed that truncates output. +-for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do +- test ! -f "$lt_ac_sed" && continue +- cat /dev/null > conftest.in +- lt_ac_count=0 +- echo $ECHO_N "0123456789$ECHO_C" >conftest.in +- # Check for GNU sed and select it if it is found. +- if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then +- lt_cv_path_SED=$lt_ac_sed +- break +- fi +- while true; do +- cat conftest.in conftest.in >conftest.tmp +- mv conftest.tmp conftest.in +- cp conftest.in conftest.nl +- echo >>conftest.nl +- $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break +- cmp -s conftest.out conftest.nl || break +- # 10000 chars as input seems more than enough +- test 10 -lt "$lt_ac_count" && break +- lt_ac_count=`expr $lt_ac_count + 1` +- if test "$lt_ac_count" -gt "$lt_ac_max"; then +- lt_ac_max=$lt_ac_count +- lt_cv_path_SED=$lt_ac_sed +- fi +- done +-done +-]) +-SED=$lt_cv_path_SED +-AC_SUBST([SED]) +-AC_MSG_RESULT([$SED]) +-])#AC_PROG_SED +-])#m4_ifndef +- +-# Old name: +-AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([LT_AC_PROG_SED], []) +- +- +-# _LT_CHECK_SHELL_FEATURES +-# ------------------------ +-# Find out whether the shell is Bourne or XSI compatible, +-# or has some other useful features. +-m4_defun([_LT_CHECK_SHELL_FEATURES], +-[if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then +- lt_unset=unset +-else +- lt_unset=false +-fi +-_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl +- +-# test EBCDIC or ASCII +-case `echo X|tr X '\101'` in +- A) # ASCII based system +- # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr +- lt_SP2NL='tr \040 \012' +- lt_NL2SP='tr \015\012 \040\040' +- ;; +- *) # EBCDIC based system +- lt_SP2NL='tr \100 \n' +- lt_NL2SP='tr \r\n \100\100' +- ;; +-esac +-_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl +-_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl +-])# _LT_CHECK_SHELL_FEATURES +- +- +-# _LT_PATH_CONVERSION_FUNCTIONS +-# ----------------------------- +-# Determine what file name conversion functions should be used by +-# func_to_host_file (and, implicitly, by func_to_host_path). These are needed +-# for certain cross-compile configurations and native mingw. +-m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], +-[AC_REQUIRE([AC_CANONICAL_HOST])dnl +-AC_REQUIRE([AC_CANONICAL_BUILD])dnl +-AC_MSG_CHECKING([how to convert $build file names to $host format]) +-AC_CACHE_VAL(lt_cv_to_host_file_cmd, +-[case $host in +- *-*-mingw* ) +- case $build in +- *-*-mingw* ) # actually msys +- lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 +- ;; +- *-*-cygwin* ) +- lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 +- ;; +- * ) # otherwise, assume *nix +- lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 +- ;; +- esac +- ;; +- *-*-cygwin* ) +- case $build in +- *-*-mingw* ) # actually msys +- lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin +- ;; +- *-*-cygwin* ) +- lt_cv_to_host_file_cmd=func_convert_file_noop +- ;; +- * ) # otherwise, assume *nix +- lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin +- ;; +- esac +- ;; +- * ) # unhandled hosts (and "normal" native builds) +- lt_cv_to_host_file_cmd=func_convert_file_noop +- ;; +-esac +-]) +-to_host_file_cmd=$lt_cv_to_host_file_cmd +-AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) +-_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], +- [0], [convert $build file names to $host format])dnl +- +-AC_MSG_CHECKING([how to convert $build file names to toolchain format]) +-AC_CACHE_VAL(lt_cv_to_tool_file_cmd, +-[#assume ordinary cross tools, or native build. +-lt_cv_to_tool_file_cmd=func_convert_file_noop +-case $host in +- *-*-mingw* ) +- case $build in +- *-*-mingw* ) # actually msys +- lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 +- ;; +- esac +- ;; +-esac +-]) +-to_tool_file_cmd=$lt_cv_to_tool_file_cmd +-AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) +-_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], +- [0], [convert $build files to toolchain format])dnl +-])# _LT_PATH_CONVERSION_FUNCTIONS +- +-# Helper functions for option handling. -*- Autoconf -*- +-# +-# Copyright (C) 2004-2005, 2007-2009, 2011-2019, 2021-2022 Free +-# Software Foundation, Inc. +-# Written by Gary V. Vaughan, 2004 +-# +-# This file is free software; the Free Software Foundation gives +-# unlimited permission to copy and/or distribute it, with or without +-# modifications, as long as this notice is preserved. +- +-# serial 8 ltoptions.m4 +- +-# This is to help aclocal find these macros, as it can't see m4_define. +-AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) +- +- +-# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) +-# ------------------------------------------ +-m4_define([_LT_MANGLE_OPTION], +-[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) +- +- +-# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) +-# --------------------------------------- +-# Set option OPTION-NAME for macro MACRO-NAME, and if there is a +-# matching handler defined, dispatch to it. Other OPTION-NAMEs are +-# saved as a flag. +-m4_define([_LT_SET_OPTION], +-[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl +-m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), +- _LT_MANGLE_DEFUN([$1], [$2]), +- [m4_warning([Unknown $1 option '$2'])])[]dnl +-]) +- +- +-# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) +-# ------------------------------------------------------------ +-# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +-m4_define([_LT_IF_OPTION], +-[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) +- +- +-# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) +-# ------------------------------------------------------- +-# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME +-# are set. +-m4_define([_LT_UNLESS_OPTIONS], +-[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), +- [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), +- [m4_define([$0_found])])])[]dnl +-m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 +-])[]dnl +-]) +- +- +-# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) +-# ---------------------------------------- +-# OPTION-LIST is a space-separated list of Libtool options associated +-# with MACRO-NAME. If any OPTION has a matching handler declared with +-# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about +-# the unknown option and exit. +-m4_defun([_LT_SET_OPTIONS], +-[# Set options +-m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), +- [_LT_SET_OPTION([$1], _LT_Option)]) +- +-m4_if([$1],[LT_INIT],[ +- dnl +- dnl Simply set some default values (i.e off) if boolean options were not +- dnl specified: +- _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no +- ]) +- _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no +- ]) +- dnl +- dnl If no reference was made to various pairs of opposing options, then +- dnl we run the default mode handler for the pair. For example, if neither +- dnl 'shared' nor 'disable-shared' was passed, we enable building of shared +- dnl archives by default: +- _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) +- _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) +- _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) +- _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], +- [_LT_ENABLE_FAST_INSTALL]) +- _LT_UNLESS_OPTIONS([LT_INIT], [aix-soname=aix aix-soname=both aix-soname=svr4], +- [_LT_WITH_AIX_SONAME([aix])]) +- ]) +-])# _LT_SET_OPTIONS +- +- +- +-# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) +-# ----------------------------------------- +-m4_define([_LT_MANGLE_DEFUN], +-[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) +- +- +-# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) +-# ----------------------------------------------- +-m4_define([LT_OPTION_DEFINE], +-[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl +-])# LT_OPTION_DEFINE +- +- +-# dlopen +-# ------ +-LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes +-]) +- +-AU_DEFUN([AC_LIBTOOL_DLOPEN], +-[_LT_SET_OPTION([LT_INIT], [dlopen]) +-AC_DIAGNOSE([obsolete], +-[$0: Remove this warning and the call to _LT_SET_OPTION when you +-put the 'dlopen' option into LT_INIT's first parameter.]) +-]) +- +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) +- +- +-# win32-dll +-# --------- +-# Declare package support for building win32 dll's. +-LT_OPTION_DEFINE([LT_INIT], [win32-dll], +-[enable_win32_dll=yes +- +-case $host in +-*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) +- AC_CHECK_TOOL(AS, as, false) +- AC_CHECK_TOOL(DLLTOOL, dlltool, false) +- AC_CHECK_TOOL(OBJDUMP, objdump, false) +- ;; +-esac +- +-test -z "$AS" && AS=as +-_LT_DECL([], [AS], [1], [Assembler program])dnl +- +-test -z "$DLLTOOL" && DLLTOOL=dlltool +-_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl +- +-test -z "$OBJDUMP" && OBJDUMP=objdump +-_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl +-])# win32-dll +- +-AU_DEFUN([AC_LIBTOOL_WIN32_DLL], +-[AC_REQUIRE([AC_CANONICAL_HOST])dnl +-_LT_SET_OPTION([LT_INIT], [win32-dll]) +-AC_DIAGNOSE([obsolete], +-[$0: Remove this warning and the call to _LT_SET_OPTION when you +-put the 'win32-dll' option into LT_INIT's first parameter.]) +-]) +- +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) +- +- +-# _LT_ENABLE_SHARED([DEFAULT]) +-# ---------------------------- +-# implement the --enable-shared flag, and supports the 'shared' and +-# 'disable-shared' LT_INIT options. +-# DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. +-m4_define([_LT_ENABLE_SHARED], +-[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl +-AC_ARG_ENABLE([shared], +- [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], +- [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], +- [p=${PACKAGE-default} +- case $enableval in +- yes) enable_shared=yes ;; +- no) enable_shared=no ;; +- *) +- enable_shared=no +- # Look at the argument we got. We use all the common list separators. +- lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, +- for pkg in $enableval; do +- IFS=$lt_save_ifs +- if test "X$pkg" = "X$p"; then +- enable_shared=yes +- fi +- done +- IFS=$lt_save_ifs +- ;; +- esac], +- [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) +- +- _LT_DECL([build_libtool_libs], [enable_shared], [0], +- [Whether or not to build shared libraries]) +-])# _LT_ENABLE_SHARED +- +-LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) +-LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) +- +-# Old names: +-AC_DEFUN([AC_ENABLE_SHARED], +-[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) +-]) +- +-AC_DEFUN([AC_DISABLE_SHARED], +-[_LT_SET_OPTION([LT_INIT], [disable-shared]) +-]) +- +-AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) +-AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) +- +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AM_ENABLE_SHARED], []) +-dnl AC_DEFUN([AM_DISABLE_SHARED], []) +- +- +- +-# _LT_ENABLE_STATIC([DEFAULT]) +-# ---------------------------- +-# implement the --enable-static flag, and support the 'static' and +-# 'disable-static' LT_INIT options. +-# DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. +-m4_define([_LT_ENABLE_STATIC], +-[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl +-AC_ARG_ENABLE([static], +- [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], +- [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], +- [p=${PACKAGE-default} +- case $enableval in +- yes) enable_static=yes ;; +- no) enable_static=no ;; +- *) +- enable_static=no +- # Look at the argument we got. We use all the common list separators. +- lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, +- for pkg in $enableval; do +- IFS=$lt_save_ifs +- if test "X$pkg" = "X$p"; then +- enable_static=yes +- fi +- done +- IFS=$lt_save_ifs +- ;; +- esac], +- [enable_static=]_LT_ENABLE_STATIC_DEFAULT) +- +- _LT_DECL([build_old_libs], [enable_static], [0], +- [Whether or not to build static libraries]) +-])# _LT_ENABLE_STATIC +- +-LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) +-LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) +- +-# Old names: +-AC_DEFUN([AC_ENABLE_STATIC], +-[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) +-]) +- +-AC_DEFUN([AC_DISABLE_STATIC], +-[_LT_SET_OPTION([LT_INIT], [disable-static]) +-]) +- +-AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) +-AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) +- +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AM_ENABLE_STATIC], []) +-dnl AC_DEFUN([AM_DISABLE_STATIC], []) +- +- +- +-# _LT_ENABLE_FAST_INSTALL([DEFAULT]) +-# ---------------------------------- +-# implement the --enable-fast-install flag, and support the 'fast-install' +-# and 'disable-fast-install' LT_INIT options. +-# DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. +-m4_define([_LT_ENABLE_FAST_INSTALL], +-[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl +-AC_ARG_ENABLE([fast-install], +- [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], +- [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], +- [p=${PACKAGE-default} +- case $enableval in +- yes) enable_fast_install=yes ;; +- no) enable_fast_install=no ;; +- *) +- enable_fast_install=no +- # Look at the argument we got. We use all the common list separators. +- lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, +- for pkg in $enableval; do +- IFS=$lt_save_ifs +- if test "X$pkg" = "X$p"; then +- enable_fast_install=yes +- fi +- done +- IFS=$lt_save_ifs +- ;; +- esac], +- [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) +- +-_LT_DECL([fast_install], [enable_fast_install], [0], +- [Whether or not to optimize for fast installation])dnl +-])# _LT_ENABLE_FAST_INSTALL +- +-LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) +-LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) +- +-# Old names: +-AU_DEFUN([AC_ENABLE_FAST_INSTALL], +-[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) +-AC_DIAGNOSE([obsolete], +-[$0: Remove this warning and the call to _LT_SET_OPTION when you put +-the 'fast-install' option into LT_INIT's first parameter.]) +-]) +- +-AU_DEFUN([AC_DISABLE_FAST_INSTALL], +-[_LT_SET_OPTION([LT_INIT], [disable-fast-install]) +-AC_DIAGNOSE([obsolete], +-[$0: Remove this warning and the call to _LT_SET_OPTION when you put +-the 'disable-fast-install' option into LT_INIT's first parameter.]) +-]) +- +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) +-dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) +- +- +-# _LT_WITH_AIX_SONAME([DEFAULT]) +-# ---------------------------------- +-# implement the --with-aix-soname flag, and support the `aix-soname=aix' +-# and `aix-soname=both' and `aix-soname=svr4' LT_INIT options. DEFAULT +-# is either `aix', `both' or `svr4'. If omitted, it defaults to `aix'. +-m4_define([_LT_WITH_AIX_SONAME], +-[m4_define([_LT_WITH_AIX_SONAME_DEFAULT], [m4_if($1, svr4, svr4, m4_if($1, both, both, aix))])dnl +-shared_archive_member_spec= +-case $host,$enable_shared in +-power*-*-aix[[5-9]]*,yes) +- AC_MSG_CHECKING([which variant of shared library versioning to provide]) +- AC_ARG_WITH([aix-soname], +- [AS_HELP_STRING([--with-aix-soname=aix|svr4|both], +- [shared library versioning (aka "SONAME") variant to provide on AIX, @<:@default=]_LT_WITH_AIX_SONAME_DEFAULT[@:>@.])], +- [case $withval in +- aix|svr4|both) +- ;; +- *) +- AC_MSG_ERROR([Unknown argument to --with-aix-soname]) +- ;; +- esac +- lt_cv_with_aix_soname=$with_aix_soname], +- [AC_CACHE_VAL([lt_cv_with_aix_soname], +- [lt_cv_with_aix_soname=]_LT_WITH_AIX_SONAME_DEFAULT) +- with_aix_soname=$lt_cv_with_aix_soname]) +- AC_MSG_RESULT([$with_aix_soname]) +- if test aix != "$with_aix_soname"; then +- # For the AIX way of multilib, we name the shared archive member +- # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', +- # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. +- # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, +- # the AIX toolchain works better with OBJECT_MODE set (default 32). +- if test 64 = "${OBJECT_MODE-32}"; then +- shared_archive_member_spec=shr_64 +- else +- shared_archive_member_spec=shr +- fi +- fi +- ;; +-*) +- with_aix_soname=aix +- ;; +-esac +- +-_LT_DECL([], [shared_archive_member_spec], [0], +- [Shared archive member basename, for filename based shared library versioning on AIX])dnl +-])# _LT_WITH_AIX_SONAME +- +-LT_OPTION_DEFINE([LT_INIT], [aix-soname=aix], [_LT_WITH_AIX_SONAME([aix])]) +-LT_OPTION_DEFINE([LT_INIT], [aix-soname=both], [_LT_WITH_AIX_SONAME([both])]) +-LT_OPTION_DEFINE([LT_INIT], [aix-soname=svr4], [_LT_WITH_AIX_SONAME([svr4])]) +- +- +-# _LT_WITH_PIC([MODE]) +-# -------------------- +-# implement the --with-pic flag, and support the 'pic-only' and 'no-pic' +-# LT_INIT options. +-# MODE is either 'yes' or 'no'. If omitted, it defaults to 'both'. +-m4_define([_LT_WITH_PIC], +-[AC_ARG_WITH([pic], +- [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@], +- [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], +- [lt_p=${PACKAGE-default} +- case $withval in +- yes|no) pic_mode=$withval ;; +- *) +- pic_mode=default +- # Look at the argument we got. We use all the common list separators. +- lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, +- for lt_pkg in $withval; do +- IFS=$lt_save_ifs +- if test "X$lt_pkg" = "X$lt_p"; then +- pic_mode=yes +- fi +- done +- IFS=$lt_save_ifs +- ;; +- esac], +- [pic_mode=m4_default([$1], [default])]) +- +-_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl +-])# _LT_WITH_PIC +- +-LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) +-LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) +- +-# Old name: +-AU_DEFUN([AC_LIBTOOL_PICMODE], +-[_LT_SET_OPTION([LT_INIT], [pic-only]) +-AC_DIAGNOSE([obsolete], +-[$0: Remove this warning and the call to _LT_SET_OPTION when you +-put the 'pic-only' option into LT_INIT's first parameter.]) +-]) +- +-dnl aclocal-1.4 backwards compatibility: +-dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) +- +- +-m4_define([_LTDL_MODE], []) +-LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], +- [m4_define([_LTDL_MODE], [nonrecursive])]) +-LT_OPTION_DEFINE([LTDL_INIT], [recursive], +- [m4_define([_LTDL_MODE], [recursive])]) +-LT_OPTION_DEFINE([LTDL_INIT], [subproject], +- [m4_define([_LTDL_MODE], [subproject])]) +- +-m4_define([_LTDL_TYPE], []) +-LT_OPTION_DEFINE([LTDL_INIT], [installable], +- [m4_define([_LTDL_TYPE], [installable])]) +-LT_OPTION_DEFINE([LTDL_INIT], [convenience], +- [m4_define([_LTDL_TYPE], [convenience])]) +- +-# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- +-# +-# Copyright (C) 2004-2005, 2007-2008, 2011-2019, 2021-2022 Free Software +-# Foundation, Inc. +-# Written by Gary V. Vaughan, 2004 +-# +-# This file is free software; the Free Software Foundation gives +-# unlimited permission to copy and/or distribute it, with or without +-# modifications, as long as this notice is preserved. +- +-# serial 6 ltsugar.m4 +- +-# This is to help aclocal find these macros, as it can't see m4_define. +-AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) +- +- +-# lt_join(SEP, ARG1, [ARG2...]) +-# ----------------------------- +-# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their +-# associated separator. +-# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier +-# versions in m4sugar had bugs. +-m4_define([lt_join], +-[m4_if([$#], [1], [], +- [$#], [2], [[$2]], +- [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) +-m4_define([_lt_join], +-[m4_if([$#$2], [2], [], +- [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) +- +- +-# lt_car(LIST) +-# lt_cdr(LIST) +-# ------------ +-# Manipulate m4 lists. +-# These macros are necessary as long as will still need to support +-# Autoconf-2.59, which quotes differently. +-m4_define([lt_car], [[$1]]) +-m4_define([lt_cdr], +-[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], +- [$#], 1, [], +- [m4_dquote(m4_shift($@))])]) +-m4_define([lt_unquote], $1) +- +- +-# lt_append(MACRO-NAME, STRING, [SEPARATOR]) +-# ------------------------------------------ +-# Redefine MACRO-NAME to hold its former content plus 'SEPARATOR''STRING'. +-# Note that neither SEPARATOR nor STRING are expanded; they are appended +-# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). +-# No SEPARATOR is output if MACRO-NAME was previously undefined (different +-# than defined and empty). +-# +-# This macro is needed until we can rely on Autoconf 2.62, since earlier +-# versions of m4sugar mistakenly expanded SEPARATOR but not STRING. +-m4_define([lt_append], +-[m4_define([$1], +- m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) +- +- +- +-# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) +-# ---------------------------------------------------------- +-# Produce a SEP delimited list of all paired combinations of elements of +-# PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list +-# has the form PREFIXmINFIXSUFFIXn. +-# Needed until we can rely on m4_combine added in Autoconf 2.62. +-m4_define([lt_combine], +-[m4_if(m4_eval([$# > 3]), [1], +- [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl +-[[m4_foreach([_Lt_prefix], [$2], +- [m4_foreach([_Lt_suffix], +- ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, +- [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) +- +- +-# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) +-# ----------------------------------------------------------------------- +-# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited +-# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. +-m4_define([lt_if_append_uniq], +-[m4_ifdef([$1], +- [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], +- [lt_append([$1], [$2], [$3])$4], +- [$5])], +- [lt_append([$1], [$2], [$3])$4])]) +- +- +-# lt_dict_add(DICT, KEY, VALUE) +-# ----------------------------- +-m4_define([lt_dict_add], +-[m4_define([$1($2)], [$3])]) +- +- +-# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) +-# -------------------------------------------- +-m4_define([lt_dict_add_subkey], +-[m4_define([$1($2:$3)], [$4])]) +- +- +-# lt_dict_fetch(DICT, KEY, [SUBKEY]) +-# ---------------------------------- +-m4_define([lt_dict_fetch], +-[m4_ifval([$3], +- m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), +- m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) +- +- +-# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) +-# ----------------------------------------------------------------- +-m4_define([lt_if_dict_fetch], +-[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], +- [$5], +- [$6])]) +- +- +-# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) +-# -------------------------------------------------------------- +-m4_define([lt_dict_filter], +-[m4_if([$5], [], [], +- [lt_join(m4_quote(m4_default([$4], [[, ]])), +- lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), +- [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl +-]) +- +-# ltversion.m4 -- version numbers -*- Autoconf -*- +-# +-# Copyright (C) 2004, 2011-2019, 2021-2022 Free Software Foundation, +-# Inc. +-# Written by Scott James Remnant, 2004 +-# +-# This file is free software; the Free Software Foundation gives +-# unlimited permission to copy and/or distribute it, with or without +-# modifications, as long as this notice is preserved. +- +-# @configure_input@ +- +-# serial 4245 ltversion.m4 +-# This file is part of GNU Libtool +- +-m4_define([LT_PACKAGE_VERSION], [2.4.7]) +-m4_define([LT_PACKAGE_REVISION], [2.4.7]) +- +-AC_DEFUN([LTVERSION_VERSION], +-[macro_version='2.4.7' +-macro_revision='2.4.7' +-_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) +-_LT_DECL(, macro_revision, 0) +-]) +- +-# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- +-# +-# Copyright (C) 2004-2005, 2007, 2009, 2011-2019, 2021-2022 Free +-# Software Foundation, Inc. +-# Written by Scott James Remnant, 2004. +-# +-# This file is free software; the Free Software Foundation gives +-# unlimited permission to copy and/or distribute it, with or without +-# modifications, as long as this notice is preserved. +- +-# serial 5 lt~obsolete.m4 +- +-# These exist entirely to fool aclocal when bootstrapping libtool. +-# +-# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN), +-# which have later been changed to m4_define as they aren't part of the +-# exported API, or moved to Autoconf or Automake where they belong. +-# +-# The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN +-# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us +-# using a macro with the same name in our local m4/libtool.m4 it'll +-# pull the old libtool.m4 in (it doesn't see our shiny new m4_define +-# and doesn't know about Autoconf macros at all.) +-# +-# So we provide this file, which has a silly filename so it's always +-# included after everything else. This provides aclocal with the +-# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything +-# because those macros already exist, or will be overwritten later. +-# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. +-# +-# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. +-# Yes, that means every name once taken will need to remain here until +-# we give up compatibility with versions before 1.7, at which point +-# we need to keep only those names which we still refer to. +- +-# This is to help aclocal find these macros, as it can't see m4_define. +-AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) +- +-m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) +-m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) +-m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) +-m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) +-m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) +-m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) +-m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) +-m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) +-m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) +-m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) +-m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) +-m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) +-m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) +-m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) +-m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) +-m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) +-m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) +-m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) +-m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) +-m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) +-m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) +-m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) +-m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) +-m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) +-m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) +-m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) +-m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) +-m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) +-m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) +-m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) +-m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) +-m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) +-m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) +-m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) +-m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) +-m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) +-m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) +-m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) +-m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) +-m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) +-m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) +-m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) +-m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) +-m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) +-m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) +-m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) +-m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) +-m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) +-m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) +-m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) +-m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) +-m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) +-m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) +-m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) +-m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])]) +-m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])]) +-m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])]) +-m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])]) +-m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])]) +-m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])]) +-m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])]) +- +-# Copyright (C) 2002-2021 Free Software Foundation, Inc. ++# Copyright (C) 2002-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -9099,7 +35,7 @@ AC_DEFUN([AM_AUTOMAKE_VERSION], + [am__api_version='1.16' + dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to + dnl require some minimum version. Point them to the right macro. +-m4_if([$1], [1.16.5], [], ++m4_if([$1], [1.16.2], [], + [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl + ]) + +@@ -9115,14 +51,14 @@ m4_define([_AM_AUTOCONF_VERSION], []) + # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. + # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. + AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], +-[AM_AUTOMAKE_VERSION([1.16.5])dnl ++[AM_AUTOMAKE_VERSION([1.16.2])dnl + m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl + _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) + + # AM_AUX_DIR_EXPAND -*- Autoconf -*- + +-# Copyright (C) 2001-2021 Free Software Foundation, Inc. ++# Copyright (C) 2001-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -9174,7 +110,7 @@ am_aux_dir=`cd "$ac_aux_dir" && pwd` + + # AM_CONDITIONAL -*- Autoconf -*- + +-# Copyright (C) 1997-2021 Free Software Foundation, Inc. ++# Copyright (C) 1997-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -9205,7 +141,7 @@ AC_CONFIG_COMMANDS_PRE( + Usually this means the macro was only invoked conditionally.]]) + fi])]) + +-# Copyright (C) 1999-2021 Free Software Foundation, Inc. ++# Copyright (C) 1999-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -9396,7 +332,7 @@ _AM_SUBST_NOTMAKE([am__nodep])dnl + + # Generate code to set up dependency tracking. -*- Autoconf -*- + +-# Copyright (C) 1999-2021 Free Software Foundation, Inc. ++# Copyright (C) 1999-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -9464,7 +400,7 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], + + # Do all the work for Automake. -*- Autoconf -*- + +-# Copyright (C) 1996-2021 Free Software Foundation, Inc. ++# Copyright (C) 1996-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -9492,10 +428,6 @@ m4_defn([AC_PROG_CC]) + # release and drop the old call support. + AC_DEFUN([AM_INIT_AUTOMAKE], + [AC_PREREQ([2.65])dnl +-m4_ifdef([_$0_ALREADY_INIT], +- [m4_fatal([$0 expanded multiple times +-]m4_defn([_$0_ALREADY_INIT]))], +- [m4_define([_$0_ALREADY_INIT], m4_expansion_stack)])dnl + dnl Autoconf wants to disallow AM_ names. We explicitly allow + dnl the ones we care about. + m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl +@@ -9532,7 +464,7 @@ m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl + [_AM_SET_OPTIONS([$1])dnl + dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. + m4_if( +- m4_ifset([AC_PACKAGE_NAME], [ok]):m4_ifset([AC_PACKAGE_VERSION], [ok]), ++ m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]), + [ok:ok],, + [m4_fatal([AC_INIT should be called with package and version arguments])])dnl + AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl +@@ -9584,20 +516,6 @@ AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], + [m4_define([AC_PROG_OBJCXX], + m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl + ]) +-# Variables for tags utilities; see am/tags.am +-if test -z "$CTAGS"; then +- CTAGS=ctags +-fi +-AC_SUBST([CTAGS]) +-if test -z "$ETAGS"; then +- ETAGS=etags +-fi +-AC_SUBST([ETAGS]) +-if test -z "$CSCOPE"; then +- CSCOPE=cscope +-fi +-AC_SUBST([CSCOPE]) +- + AC_REQUIRE([AM_SILENT_RULES])dnl + dnl The testsuite driver may need to know about EXEEXT, so add the + dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This +@@ -9679,7 +597,7 @@ for _am_header in $config_headers :; do + done + echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) + +-# Copyright (C) 2001-2021 Free Software Foundation, Inc. ++# Copyright (C) 2001-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -9700,29 +618,10 @@ if test x"${install_sh+set}" != xset; then + fi + AC_SUBST([install_sh])]) + +-# Copyright (C) 2003-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# Check whether the underlying file-system supports filenames +-# with a leading dot. For instance MS-DOS doesn't. +-AC_DEFUN([AM_SET_LEADING_DOT], +-[rm -rf .tst 2>/dev/null +-mkdir .tst 2>/dev/null +-if test -d .tst; then +- am__leading_dot=. +-else +- am__leading_dot=_ +-fi +-rmdir .tst 2>/dev/null +-AC_SUBST([am__leading_dot])]) +- + # Add --enable-maintainer-mode option to configure. -*- Autoconf -*- + # From Jim Meyering + +-# Copyright (C) 1996-2021 Free Software Foundation, Inc. ++# Copyright (C) 1996-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -9757,7 +656,7 @@ AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles]) + + # Check to see how 'make' treats includes. -*- Autoconf -*- + +-# Copyright (C) 2001-2021 Free Software Foundation, Inc. ++# Copyright (C) 2001-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -9800,7 +699,7 @@ AC_SUBST([am__quote])]) + + # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- + +-# Copyright (C) 1997-2021 Free Software Foundation, Inc. ++# Copyright (C) 1997-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -9821,7 +720,12 @@ AC_DEFUN([AM_MISSING_HAS_RUN], + [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl + AC_REQUIRE_AUX_FILE([missing])dnl + if test x"${MISSING+set}" != xset; then +- MISSING="\${SHELL} '$am_aux_dir/missing'" ++ case $am_aux_dir in ++ *\ * | *\ *) ++ MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; ++ *) ++ MISSING="\${SHELL} $am_aux_dir/missing" ;; ++ esac + fi + # Use eval to expand $SHELL + if eval "$MISSING --is-lightweight"; then +@@ -9834,7 +738,7 @@ fi + + # Helper functions for option handling. -*- Autoconf -*- + +-# Copyright (C) 2001-2021 Free Software Foundation, Inc. ++# Copyright (C) 2001-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -9863,7 +767,7 @@ AC_DEFUN([_AM_SET_OPTIONS], + AC_DEFUN([_AM_IF_OPTION], + [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) + +-# Copyright (C) 1999-2021 Free Software Foundation, Inc. ++# Copyright (C) 1999-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -9910,7 +814,7 @@ AC_LANG_POP([C])]) + # For backward compatibility. + AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])]) + +-# Copyright (C) 2001-2021 Free Software Foundation, Inc. ++# Copyright (C) 2001-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -9929,7 +833,7 @@ AC_DEFUN([AM_RUN_LOG], + + # Check to make sure that the build environment is sane. -*- Autoconf -*- + +-# Copyright (C) 1996-2021 Free Software Foundation, Inc. ++# Copyright (C) 1996-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -10010,7 +914,7 @@ AC_CONFIG_COMMANDS_PRE( + rm -f conftest.file + ]) + +-# Copyright (C) 2009-2021 Free Software Foundation, Inc. ++# Copyright (C) 2009-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -10070,7 +974,7 @@ AC_SUBST([AM_BACKSLASH])dnl + _AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl + ]) + +-# Copyright (C) 2001-2021 Free Software Foundation, Inc. ++# Copyright (C) 2001-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -10098,7 +1002,7 @@ fi + INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" + AC_SUBST([INSTALL_STRIP_PROGRAM])]) + +-# Copyright (C) 2006-2021 Free Software Foundation, Inc. ++# Copyright (C) 2006-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -10117,7 +1021,7 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) + + # Check how to create a tarball. -*- Autoconf -*- + +-# Copyright (C) 2004-2021 Free Software Foundation, Inc. ++# Copyright (C) 2004-2020 Free Software Foundation, Inc. + # + # This file is free software; the Free Software Foundation + # gives unlimited permission to copy and/or distribute it, +@@ -10248,3 +1152,14 @@ AC_SUBST([am__tar]) + AC_SUBST([am__untar]) + ]) # _AM_PROG_TAR + ++m4_include([../config/acx.m4]) ++m4_include([../config/depstand.m4]) ++m4_include([../config/lead-dot.m4]) ++m4_include([../config/lthostflags.m4]) ++m4_include([../config/override.m4]) ++m4_include([../config/warnings.m4]) ++m4_include([../libtool.m4]) ++m4_include([../ltoptions.m4]) ++m4_include([../ltsugar.m4]) ++m4_include([../ltversion.m4]) ++m4_include([../lt~obsolete.m4]) +diff --git a/bolt-plugin/config.h.in b/bolt-plugin/config.h.in +index 9e9d316ec..ddbde7619 100644 +--- a/bolt-plugin/config.h.in ++++ b/bolt-plugin/config.h.in +@@ -6,15 +6,12 @@ + /* Define to 1 if you have the header file. */ + #undef HAVE_INTTYPES_H + +-/* Define to 1 if you have the header file. */ +-#undef HAVE_MINIX_CONFIG_H ++/* Define to 1 if you have the header file. */ ++#undef HAVE_MEMORY_H + + /* Define to 1 if you have the header file. */ + #undef HAVE_STDINT_H + +-/* Define to 1 if you have the header file. */ +-#undef HAVE_STDIO_H +- + /* Define to 1 if you have the header file. */ + #undef HAVE_STDLIB_H + +@@ -36,10 +33,8 @@ + /* Define to 1 if you have the header file. */ + #undef HAVE_UNISTD_H + +-/* Define to 1 if you have the header file. */ +-#undef HAVE_WCHAR_H +- +-/* Define to the sub-directory where libtool stores uninstalled libraries. */ ++/* Define to the sub-directory in which libtool stores uninstalled libraries. ++ */ + #undef LT_OBJDIR + + /* Name of package */ +@@ -63,108 +58,55 @@ + /* Define to the version of this package. */ + #undef PACKAGE_VERSION + +-/* Define to 1 if all of the C90 standard headers exist (not just the ones +- required in a freestanding environment). This macro is provided for +- backward compatibility; new code need not use it. */ ++/* Define to 1 if you have the ANSI C header files. */ + #undef STDC_HEADERS + + /* Enable extensions on AIX 3, Interix. */ + #ifndef _ALL_SOURCE + # undef _ALL_SOURCE + #endif +-/* Enable general extensions on macOS. */ +-#ifndef _DARWIN_C_SOURCE +-# undef _DARWIN_C_SOURCE +-#endif +-/* Enable general extensions on Solaris. */ +-#ifndef __EXTENSIONS__ +-# undef __EXTENSIONS__ +-#endif + /* Enable GNU extensions on systems that have them. */ + #ifndef _GNU_SOURCE + # undef _GNU_SOURCE + #endif +-/* Enable X/Open compliant socket functions that do not require linking +- with -lxnet on HP-UX 11.11. */ +-#ifndef _HPUX_ALT_XOPEN_SOCKET_API +-# undef _HPUX_ALT_XOPEN_SOCKET_API +-#endif +-/* Identify the host operating system as Minix. +- This macro does not affect the system headers' behavior. +- A future release of Autoconf may stop defining this macro. */ +-#ifndef _MINIX +-# undef _MINIX +-#endif +-/* Enable general extensions on NetBSD. +- Enable NetBSD compatibility extensions on Minix. */ +-#ifndef _NETBSD_SOURCE +-# undef _NETBSD_SOURCE +-#endif +-/* Enable OpenBSD compatibility extensions on NetBSD. +- Oddly enough, this does nothing on OpenBSD. */ +-#ifndef _OPENBSD_SOURCE +-# undef _OPENBSD_SOURCE +-#endif +-/* Define to 1 if needed for POSIX-compatible behavior. */ +-#ifndef _POSIX_SOURCE +-# undef _POSIX_SOURCE +-#endif +-/* Define to 2 if needed for POSIX-compatible behavior. */ +-#ifndef _POSIX_1_SOURCE +-# undef _POSIX_1_SOURCE +-#endif +-/* Enable POSIX-compatible threading on Solaris. */ ++/* Enable threading extensions on Solaris. */ + #ifndef _POSIX_PTHREAD_SEMANTICS + # undef _POSIX_PTHREAD_SEMANTICS + #endif +-/* Enable extensions specified by ISO/IEC TS 18661-5:2014. */ +-#ifndef __STDC_WANT_IEC_60559_ATTRIBS_EXT__ +-# undef __STDC_WANT_IEC_60559_ATTRIBS_EXT__ +-#endif +-/* Enable extensions specified by ISO/IEC TS 18661-1:2014. */ +-#ifndef __STDC_WANT_IEC_60559_BFP_EXT__ +-# undef __STDC_WANT_IEC_60559_BFP_EXT__ +-#endif +-/* Enable extensions specified by ISO/IEC TS 18661-2:2015. */ +-#ifndef __STDC_WANT_IEC_60559_DFP_EXT__ +-# undef __STDC_WANT_IEC_60559_DFP_EXT__ +-#endif +-/* Enable extensions specified by ISO/IEC TS 18661-4:2015. */ +-#ifndef __STDC_WANT_IEC_60559_FUNCS_EXT__ +-# undef __STDC_WANT_IEC_60559_FUNCS_EXT__ +-#endif +-/* Enable extensions specified by ISO/IEC TS 18661-3:2015. */ +-#ifndef __STDC_WANT_IEC_60559_TYPES_EXT__ +-# undef __STDC_WANT_IEC_60559_TYPES_EXT__ +-#endif +-/* Enable extensions specified by ISO/IEC TR 24731-2:2010. */ +-#ifndef __STDC_WANT_LIB_EXT2__ +-# undef __STDC_WANT_LIB_EXT2__ +-#endif +-/* Enable extensions specified by ISO/IEC 24747:2009. */ +-#ifndef __STDC_WANT_MATH_SPEC_FUNCS__ +-# undef __STDC_WANT_MATH_SPEC_FUNCS__ +-#endif + /* Enable extensions on HP NonStop. */ + #ifndef _TANDEM_SOURCE + # undef _TANDEM_SOURCE + #endif +-/* Enable X/Open extensions. Define to 500 only if necessary +- to make mbstate_t available. */ +-#ifndef _XOPEN_SOURCE +-# undef _XOPEN_SOURCE ++/* Enable general extensions on Solaris. */ ++#ifndef __EXTENSIONS__ ++# undef __EXTENSIONS__ + #endif + + + /* Version number of package */ + #undef VERSION + ++/* Enable large inode numbers on Mac OS X 10.5. */ ++#ifndef _DARWIN_USE_64_BIT_INODE ++# define _DARWIN_USE_64_BIT_INODE 1 ++#endif ++ + /* Number of bits in a file offset, on hosts where this is settable. */ + #undef _FILE_OFFSET_BITS + + /* Define for large files, on AIX-style hosts. */ + #undef _LARGE_FILES + ++/* Define to 1 if on MINIX. */ ++#undef _MINIX ++ ++/* Define to 2 if the system does not provide POSIX.1 features except with ++ this defined. */ ++#undef _POSIX_1_SOURCE ++ ++/* Define to 1 if you need to in order for `stat' and other things to work. */ ++#undef _POSIX_SOURCE ++ + /* Define for Solaris 2.5.1 so the uint64_t typedef from , + , or is not used. If the typedef were allowed, the + #define below would cause a syntax error. */ +diff --git a/bolt-plugin/configure b/bolt-plugin/configure +index 63bde9a41..78e7e57c3 100755 +--- a/bolt-plugin/configure ++++ b/bolt-plugin/configure +@@ -1,10 +1,9 @@ + #! /bin/sh + # Guess values for system-dependent variables and create Makefiles. +-# Generated by GNU Autoconf 2.71 for bolt plugin for ld 0.1. ++# Generated by GNU Autoconf 2.69 for bolt plugin for ld 0.1. + # + # +-# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation, +-# Inc. ++# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. + # + # + # This configure script is free software; the Free Software Foundation +@@ -15,16 +14,14 @@ + + # Be more Bourne compatible + DUALCASE=1; export DUALCASE # for MKS sh +-as_nop=: +-if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 +-then : ++if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +-else $as_nop ++else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( +@@ -34,46 +31,46 @@ esac + fi + + +- +-# Reset variables that may have inherited troublesome values from +-# the environment. +- +-# IFS needs to be set, to space, tab, and newline, in precisely that order. +-# (If _AS_PATH_WALK were called with IFS unset, it would have the +-# side effect of setting IFS to empty, thus disabling word splitting.) +-# Quoting is to prevent editors from complaining about space-tab. + as_nl=' + ' + export as_nl +-IFS=" "" $as_nl" +- +-PS1='$ ' +-PS2='> ' +-PS4='+ ' +- +-# Ensure predictable behavior from utilities with locale-dependent output. +-LC_ALL=C +-export LC_ALL +-LANGUAGE=C +-export LANGUAGE +- +-# We cannot yet rely on "unset" to work, but we need these variables +-# to be unset--not just set to an empty or harmless value--now, to +-# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh). This construct +-# also avoids known problems related to "unset" and subshell syntax +-# in other old shells (e.g. bash 2.01 and pdksh 5.2.14). +-for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH +-do eval test \${$as_var+y} \ +- && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +-done +- +-# Ensure that fds 0, 1, and 2 are open. +-if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi +-if (exec 3>&2) ; then :; else exec 2>/dev/null; fi ++# Printing a long string crashes Solaris 7 /usr/bin/printf. ++as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ++as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo ++as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo ++# Prefer a ksh shell builtin over an external printf program on Solaris, ++# but without wasting forks for bash or zsh. ++if test -z "$BASH_VERSION$ZSH_VERSION" \ ++ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then ++ as_echo='print -r --' ++ as_echo_n='print -rn --' ++elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then ++ as_echo='printf %s\n' ++ as_echo_n='printf %s' ++else ++ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then ++ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' ++ as_echo_n='/usr/ucb/echo -n' ++ else ++ as_echo_body='eval expr "X$1" : "X\\(.*\\)"' ++ as_echo_n_body='eval ++ arg=$1; ++ case $arg in #( ++ *"$as_nl"*) ++ expr "X$arg" : "X\\(.*\\)$as_nl"; ++ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; ++ esac; ++ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ++ ' ++ export as_echo_n_body ++ as_echo_n='sh -c $as_echo_n_body as_echo' ++ fi ++ export as_echo_body ++ as_echo='sh -c $as_echo_body as_echo' ++fi + + # The user is always right. +-if ${PATH_SEPARATOR+false} :; then ++if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || +@@ -82,6 +79,13 @@ if ${PATH_SEPARATOR+false} :; then + fi + + ++# IFS ++# We need space, tab and new line, in precisely that order. Quoting is ++# there to prevent editors from complaining about space-tab. ++# (If _AS_PATH_WALK were called with IFS unset, it would disable word ++# splitting by setting IFS to empty value.) ++IFS=" "" $as_nl" ++ + # Find who we are. Look in the path if we contain no directory separator. + as_myself= + case $0 in #(( +@@ -90,12 +94,8 @@ case $0 in #(( + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- test -r "$as_dir$0" && as_myself=$as_dir$0 && break ++ test -z "$as_dir" && as_dir=. ++ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done + IFS=$as_save_IFS + +@@ -107,10 +107,30 @@ if test "x$as_myself" = x; then + as_myself=$0 + fi + if test ! -f "$as_myself"; then +- printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 ++ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 + fi + ++# Unset variables that we do not need and which cause bugs (e.g. in ++# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" ++# suppresses any "Segmentation fault" message there. '((' could ++# trigger a bug in pdksh 5.2.14. ++for as_var in BASH_ENV ENV MAIL MAILPATH ++do eval test x\${$as_var+set} = xset \ ++ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : ++done ++PS1='$ ' ++PS2='> ' ++PS4='+ ' ++ ++# NLS nuisances. ++LC_ALL=C ++export LC_ALL ++LANGUAGE=C ++export LANGUAGE ++ ++# CDPATH. ++(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + # Use a proper internal environment variable to ensure we don't fall + # into an infinite loop, continuously re-executing ourselves. +@@ -132,22 +152,20 @@ esac + exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} + # Admittedly, this is quite paranoid, since all the known shells bail + # out after a failed `exec'. +-printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2 +-exit 255 ++$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 ++as_fn_exit 255 + fi + # We don't want this to propagate to other subprocesses. + { _as_can_reexec=; unset _as_can_reexec;} + if test "x$CONFIG_SHELL" = x; then +- as_bourne_compatible="as_nop=: +-if test \${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 +-then : ++ as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +-else \$as_nop ++else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( +@@ -167,20 +185,18 @@ as_fn_success || { exitcode=1; echo as_fn_success failed.; } + as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } + as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } + as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +-if ( set x; as_fn_ret_success y && test x = \"\$1\" ) +-then : ++if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +-else \$as_nop ++else + exitcode=1; echo positional parameters were not saved. + fi + test x\$exitcode = x0 || exit 1 +-blah=\$(echo \$(echo blah)) +-test x\"\$blah\" = xblah || exit 1 + test -x / || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 ++test \$(( 1 + 1 )) = 2 || exit 1 + + test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( + ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +@@ -188,40 +204,31 @@ test -x / || exit 1" + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + PATH=/empty FPATH=/empty; export PATH FPATH + test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ +- || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1 +-test \$(( 1 + 1 )) = 2 || exit 1" +- if (eval "$as_required") 2>/dev/null +-then : ++ || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1" ++ if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +-else $as_nop ++else + as_have_required=no + fi +- if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null +-then : ++ if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +-else $as_nop ++else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + as_found=false + for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. +- as_shell=$as_dir$as_base ++ as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && +- as_run=a "$as_shell" -c "$as_bourne_compatible""$as_required" 2>/dev/null +-then : ++ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes +- if as_run=a "$as_shell" -c "$as_bourne_compatible""$as_suggested" 2>/dev/null +-then : ++ if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 + fi + fi +@@ -229,21 +236,14 @@ fi + esac + as_found=false + done +-IFS=$as_save_IFS +-if $as_found +-then : +- +-else $as_nop +- if { test -f "$SHELL" || test -f "$SHELL.exe"; } && +- as_run=a "$SHELL" -c "$as_bourne_compatible""$as_required" 2>/dev/null +-then : ++$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && ++ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +-fi +-fi ++fi; } ++IFS=$as_save_IFS + + +- if test "x$CONFIG_SHELL" != x +-then : ++ if test "x$CONFIG_SHELL" != x; then : + export CONFIG_SHELL + # We cannot yet assume a decent shell, so we have to provide a + # neutralization value for shells without unset; and this also +@@ -261,19 +261,18 @@ esac + exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} + # Admittedly, this is quite paranoid, since all the known shells bail + # out after a failed `exec'. +-printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2 ++$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 + exit 255 + fi + +- if test x$as_have_required = xno +-then : +- printf "%s\n" "$0: This script requires a shell more modern than all" +- printf "%s\n" "$0: the shells that I found on your system." +- if test ${ZSH_VERSION+y} ; then +- printf "%s\n" "$0: In particular, zsh $ZSH_VERSION has bugs and should" +- printf "%s\n" "$0: be upgraded to zsh 4.3.4 or later." ++ if test x$as_have_required = xno; then : ++ $as_echo "$0: This script requires a shell more modern than all" ++ $as_echo "$0: the shells that I found on your system." ++ if test x${ZSH_VERSION+set} = xset ; then ++ $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" ++ $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else +- printf "%s\n" "$0: Please tell bug-autoconf@gnu.org about your system, ++ $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, + $0: including any error possibly output before this + $0: message. Then install a modern shell, or manually run + $0: the script under such a shell if you do have one." +@@ -300,7 +299,6 @@ as_fn_unset () + } + as_unset=as_fn_unset + +- + # as_fn_set_status STATUS + # ----------------------- + # Set $? to STATUS, without forking. +@@ -318,14 +316,6 @@ as_fn_exit () + as_fn_set_status $1 + exit $1 + } # as_fn_exit +-# as_fn_nop +-# --------- +-# Do nothing but, unlike ":", preserve the value of $?. +-as_fn_nop () +-{ +- return $? +-} +-as_nop=as_fn_nop + + # as_fn_mkdir_p + # ------------- +@@ -340,7 +330,7 @@ as_fn_mkdir_p () + as_dirs= + while :; do + case $as_dir in #( +- *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( ++ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" +@@ -349,7 +339,7 @@ $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +-printf "%s\n" X"$as_dir" | ++$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q +@@ -388,13 +378,12 @@ as_fn_executable_p () + # advantage of any shell optimizations that allow amortized linear growth over + # repeated appends, instead of the typical quadratic growth present in naive + # implementations. +-if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null +-then : ++if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +-else $as_nop ++else + as_fn_append () + { + eval $1=\$$1\$2 +@@ -406,27 +395,18 @@ fi # as_fn_append + # Perform arithmetic evaluation on the ARGs, and store the result in the + # global $as_val. Take advantage of shells that can avoid forks. The arguments + # must be portable across $(()) and expr. +-if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null +-then : ++if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +-else $as_nop ++else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } + fi # as_fn_arith + +-# as_fn_nop +-# --------- +-# Do nothing but, unlike ":", preserve the value of $?. +-as_fn_nop () +-{ +- return $? +-} +-as_nop=as_fn_nop + + # as_fn_error STATUS ERROR [LINENO LOG_FD] + # ---------------------------------------- +@@ -438,9 +418,9 @@ as_fn_error () + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 ++ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi +- printf "%s\n" "$as_me: error: $2" >&2 ++ $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status + } # as_fn_error + +@@ -467,7 +447,7 @@ as_me=`$as_basename -- "$0" || + $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +-printf "%s\n" X/"$0" | ++$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q +@@ -511,7 +491,7 @@ as_cr_alnum=$as_cr_Letters$as_cr_digits + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || +- { printf "%s\n" "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } ++ { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # If we had to re-execute with $CONFIG_SHELL, we're ensured to have + # already done that, so ensure we don't try to do so again and fall +@@ -525,10 +505,6 @@ as_cr_alnum=$as_cr_Letters$as_cr_digits + exit + } + +- +-# Determine whether it's possible to make 'echo' print without a newline. +-# These variables are no longer used directly by Autoconf, but are AC_SUBSTed +-# for compatibility with existing Makefiles. + ECHO_C= ECHO_N= ECHO_T= + case `echo -n x` in #((((( + -n*) +@@ -542,13 +518,6 @@ case `echo -n x` in #((((( + ECHO_N='-n';; + esac + +-# For backward compatibility with old third-party macros, we provide +-# the shell variables $as_echo and $as_echo_n. New code should use +-# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively. +-as_echo='printf %s\n' +-as_echo_n='printf %s' +- +- + rm -f conf$$ conf$$.exe conf$$.file + if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +@@ -625,75 +594,78 @@ PACKAGE_URL='' + + # Factoring default headers for most tests. + ac_includes_default="\ +-#include +-#ifdef HAVE_STDIO_H +-# include ++#include ++#ifdef HAVE_SYS_TYPES_H ++# include + #endif +-#ifdef HAVE_STDLIB_H ++#ifdef HAVE_SYS_STAT_H ++# include ++#endif ++#ifdef STDC_HEADERS + # include ++# include ++#else ++# ifdef HAVE_STDLIB_H ++# include ++# endif + #endif + #ifdef HAVE_STRING_H ++# if !defined STDC_HEADERS && defined HAVE_MEMORY_H ++# include ++# endif + # include + #endif ++#ifdef HAVE_STRINGS_H ++# include ++#endif + #ifdef HAVE_INTTYPES_H + # include + #endif + #ifdef HAVE_STDINT_H + # include + #endif +-#ifdef HAVE_STRINGS_H +-# include +-#endif +-#ifdef HAVE_SYS_TYPES_H +-# include +-#endif +-#ifdef HAVE_SYS_STAT_H +-# include +-#endif + #ifdef HAVE_UNISTD_H + # include + #endif" + +-ac_header_c_list= + ac_subst_vars='am__EXEEXT_FALSE + am__EXEEXT_TRUE + LTLIBOBJS + LIBOBJS + target_noncanonical ++lt_host_flags + CXXCPP +-LT_SYS_LIBRARY_PATH + OTOOL64 + OTOOL + LIPO + NMEDIT + DSYMUTIL +-MANIFEST_TOOL + RANLIB +-ac_ct_AR + AR +-DLLTOOL + OBJDUMP +-FILECMD + LN_S + NM + ac_ct_DUMPBIN + DUMPBIN + LD + FGREP +-EGREP +-GREP + SED + LIBTOOL ++get_gcc_base_ver + real_target_noncanonical + accel_dir_suffix + gcc_build_dir + ac_bolt_plugin_ldflags ++ac_bolt_plugin_warn_cflags + am__fastdepCXX_FALSE + am__fastdepCXX_TRUE + CXXDEPMODE + ac_ct_CXX + CXXFLAGS + CXX ++EGREP ++GREP ++CPP + am__fastdepCC_FALSE + am__fastdepCC_TRUE + CCDEPMODE +@@ -718,9 +690,6 @@ AM_BACKSLASH + AM_DEFAULT_VERBOSITY + AM_DEFAULT_V + AM_V +-CSCOPE +-ETAGS +-CTAGS + am__untar + am__tar + AMTAR +@@ -744,6 +713,10 @@ am__isrc + INSTALL_DATA + INSTALL_SCRIPT + INSTALL_PROGRAM ++target_subdir ++host_subdir ++build_subdir ++build_libsubdir + target_os + target_vendor + target_cpu +@@ -775,7 +748,6 @@ infodir + docdir + oldincludedir + includedir +-runstatedir + localstatedir + sharedstatedir + sysconfdir +@@ -799,18 +771,18 @@ am__quote' + ac_subst_files='' + ac_user_opts=' + enable_option_checking ++with_build_libsubdir + enable_silent_rules + enable_maintainer_mode + with_libiberty + enable_dependency_tracking + enable_largefile ++with_gcc_major_version_only + enable_shared + enable_static + with_pic + enable_fast_install +-with_aix_soname + with_gnu_ld +-with_sysroot + enable_libtool_lock + ' + ac_precious_vars='build_alias +@@ -821,10 +793,10 @@ CFLAGS + LDFLAGS + LIBS + CPPFLAGS ++CPP + CXX + CXXFLAGS + CCC +-LT_SYS_LIBRARY_PATH + CXXCPP' + + +@@ -864,7 +836,6 @@ datadir='${datarootdir}' + sysconfdir='${prefix}/etc' + sharedstatedir='${prefix}/com' + localstatedir='${prefix}/var' +-runstatedir='${localstatedir}/run' + includedir='${prefix}/include' + oldincludedir='/usr/include' + docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +@@ -894,6 +865,8 @@ do + *) ac_optarg=yes ;; + esac + ++ # Accept the important Cygnus configure options, so we can diagnose typos. ++ + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; +@@ -934,9 +907,9 @@ do + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && +- as_fn_error $? "invalid feature name: \`$ac_useropt'" ++ as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt +- ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` ++ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" + "enable_$ac_useropt" +@@ -960,9 +933,9 @@ do + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && +- as_fn_error $? "invalid feature name: \`$ac_useropt'" ++ as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt +- ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` ++ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" + "enable_$ac_useropt" +@@ -1115,15 +1088,6 @@ do + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + +- -runstatedir | --runstatedir | --runstatedi | --runstated \ +- | --runstate | --runstat | --runsta | --runst | --runs \ +- | --run | --ru | --r) +- ac_prev=runstatedir ;; +- -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ +- | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ +- | --run=* | --ru=* | --r=*) +- runstatedir=$ac_optarg ;; +- + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ +@@ -1173,9 +1137,9 @@ do + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && +- as_fn_error $? "invalid package name: \`$ac_useropt'" ++ as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt +- ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` ++ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" + "with_$ac_useropt" +@@ -1189,9 +1153,9 @@ do + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && +- as_fn_error $? "invalid package name: \`$ac_useropt'" ++ as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt +- ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` ++ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" + "with_$ac_useropt" +@@ -1235,9 +1199,9 @@ Try \`$0 --help' for more information" + + *) + # FIXME: should be removed in autoconf 3.0. +- printf "%s\n" "$as_me: WARNING: you should use --build, --host, --target" >&2 ++ $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && +- printf "%s\n" "$as_me: WARNING: invalid host type: $ac_option" >&2 ++ $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + +@@ -1253,7 +1217,7 @@ if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; +- *) printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; ++ *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac + fi + +@@ -1261,7 +1225,7 @@ fi + for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ +- libdir localedir mandir runstatedir ++ libdir localedir mandir + do + eval ac_val=\$$ac_var + # Remove trailing slashes. +@@ -1317,7 +1281,7 @@ $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +-printf "%s\n" X"$as_myself" | ++$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q +@@ -1414,7 +1378,6 @@ Fine tuning of the installation directories: + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] +- --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] +@@ -1473,16 +1436,14 @@ Optional Features: + Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) ++ --with-build-libsubdir=DIR Directory where to find libraries for build system + --with-libiberty=PATH specify the directory where to find libiberty + [../libiberty] +- --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use ++ --with-gcc-major-version-only ++ use only GCC major number in filesystem paths ++ --with-pic try to use only PIC/non-PIC objects [default=use + both] +- --with-aix-soname=aix|svr4|both +- shared library versioning (aka "SONAME") variant to +- provide on AIX, [default=aix]. + --with-gnu-ld assume the C compiler uses GNU ld [default=no] +- --with-sysroot[=DIR] Search for dependent libraries within DIR (or the +- compiler's sysroot if not specified). + + Some influential environment variables: + CC C compiler command +@@ -1492,10 +1453,9 @@ Some influential environment variables: + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory ++ CPP C preprocessor + CXX C++ compiler command + CXXFLAGS C++ compiler flags +- LT_SYS_LIBRARY_PATH +- User-defined run-time library search path. + CXXCPP C++ preprocessor + + Use these variables to override the choices made by `configure' or to help +@@ -1517,9 +1477,9 @@ if test "$ac_init_help" = "recursive"; then + case "$ac_dir" in + .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) +- ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'` ++ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. +- ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` ++ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; +@@ -1547,8 +1507,7 @@ esac + ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } +- # Check for configure.gnu first; this name is used for a wrapper for +- # Metaconfig's "Configure" on case-insensitive file systems. ++ # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive +@@ -1556,7 +1515,7 @@ ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else +- printf "%s\n" "$as_me: WARNING: no configuration information is in $ac_dir" >&2 ++ $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +@@ -1566,9 +1525,9 @@ test -n "$ac_init_help" && exit $ac_status + if $ac_init_version; then + cat <<\_ACEOF + bolt plugin for ld configure 0.1 +-generated by GNU Autoconf 2.71 ++generated by GNU Autoconf 2.69 + +-Copyright (C) 2021 Free Software Foundation, Inc. ++Copyright (C) 2012 Free Software Foundation, Inc. + This configure script is free software; the Free Software Foundation + gives unlimited permission to copy, distribute and modify it. + _ACEOF +@@ -1585,14 +1544,14 @@ fi + ac_fn_c_try_compile () + { + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack +- rm -f conftest.$ac_objext conftest.beam ++ rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" + case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; + esac + eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +-printf "%s\n" "$ac_try_echo"; } >&5 ++$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then +@@ -1600,15 +1559,14 @@ printf "%s\n" "$ac_try_echo"; } >&5 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err +- } && test -s conftest.$ac_objext +-then : ++ } && test -s conftest.$ac_objext; then : + ac_retval=0 +-else $as_nop +- printf "%s\n" "$as_me: failed program was:" >&5 ++else ++ $as_echo "$as_me: failed program was:" >&5 + sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +@@ -1618,6 +1576,172 @@ fi + + } # ac_fn_c_try_compile + ++# ac_fn_c_try_cpp LINENO ++# ---------------------- ++# Try to preprocess conftest.$ac_ext, and return whether this succeeded. ++ac_fn_c_try_cpp () ++{ ++ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ if { { ac_try="$ac_cpp conftest.$ac_ext" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++$as_echo "$ac_try_echo"; } >&5 ++ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ++ ac_status=$? ++ if test -s conftest.err; then ++ grep -v '^ *+' conftest.err >conftest.er1 ++ cat conftest.er1 >&5 ++ mv -f conftest.er1 conftest.err ++ fi ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } > conftest.i && { ++ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || ++ test ! -s conftest.err ++ }; then : ++ ac_retval=0 ++else ++ $as_echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ ac_retval=1 ++fi ++ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno ++ as_fn_set_status $ac_retval ++ ++} # ac_fn_c_try_cpp ++ ++# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES ++# ------------------------------------------------------- ++# Tests whether HEADER exists, giving a warning if it cannot be compiled using ++# the include files in INCLUDES and setting the cache variable VAR ++# accordingly. ++ac_fn_c_check_header_mongrel () ++{ ++ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ if eval \${$3+:} false; then : ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 ++$as_echo_n "checking for $2... " >&6; } ++if eval \${$3+:} false; then : ++ $as_echo_n "(cached) " >&6 ++fi ++eval ac_res=\$$3 ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 ++$as_echo "$ac_res" >&6; } ++else ++ # Is the header compilable? ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 ++$as_echo_n "checking $2 usability... " >&6; } ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++$4 ++#include <$2> ++_ACEOF ++if ac_fn_c_try_compile "$LINENO"; then : ++ ac_header_compiler=yes ++else ++ ac_header_compiler=no ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 ++$as_echo "$ac_header_compiler" >&6; } ++ ++# Is the header present? ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 ++$as_echo_n "checking $2 presence... " >&6; } ++cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#include <$2> ++_ACEOF ++if ac_fn_c_try_cpp "$LINENO"; then : ++ ac_header_preproc=yes ++else ++ ac_header_preproc=no ++fi ++rm -f conftest.err conftest.i conftest.$ac_ext ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 ++$as_echo "$ac_header_preproc" >&6; } ++ ++# So? What about this header? ++case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( ++ yes:no: ) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 ++$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 ++$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ++ ;; ++ no:yes:* ) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 ++$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 ++$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 ++$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 ++$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 ++$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ++ ;; ++esac ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 ++$as_echo_n "checking for $2... " >&6; } ++if eval \${$3+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ eval "$3=\$ac_header_compiler" ++fi ++eval ac_res=\$$3 ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 ++$as_echo "$ac_res" >&6; } ++fi ++ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno ++ ++} # ac_fn_c_check_header_mongrel ++ ++# ac_fn_c_try_run LINENO ++# ---------------------- ++# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes ++# that executables *can* be run. ++ac_fn_c_try_run () ++{ ++ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack ++ if { { ac_try="$ac_link" ++case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++$as_echo "$ac_try_echo"; } >&5 ++ (eval "$ac_link") 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' ++ { { case "(($ac_try" in ++ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; ++ *) ac_try_echo=$ac_try;; ++esac ++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" ++$as_echo "$ac_try_echo"; } >&5 ++ (eval "$ac_try") 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; }; then : ++ ac_retval=0 ++else ++ $as_echo "$as_me: program exited with status $ac_status" >&5 ++ $as_echo "$as_me: failed program was:" >&5 ++sed 's/^/| /' conftest.$ac_ext >&5 ++ ++ ac_retval=$ac_status ++fi ++ rm -rf conftest.dSYM conftest_ipa8_conftest.oo ++ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno ++ as_fn_set_status $ac_retval ++ ++} # ac_fn_c_try_run ++ + # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES + # ------------------------------------------------------- + # Tests whether HEADER exists and can be compiled using the include files in +@@ -1625,28 +1749,26 @@ fi + ac_fn_c_check_header_compile () + { + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +-printf %s "checking for $2... " >&6; } +-if eval test \${$3+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 ++$as_echo_n "checking for $2... " >&6; } ++if eval \${$3+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + $4 + #include <$2> + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +-else $as_nop ++else + eval "$3=no" + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + eval ac_res=\$$3 +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +-printf "%s\n" "$ac_res" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 ++$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + + } # ac_fn_c_check_header_compile +@@ -1657,14 +1779,14 @@ printf "%s\n" "$ac_res" >&6; } + ac_fn_cxx_try_compile () + { + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack +- rm -f conftest.$ac_objext conftest.beam ++ rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" + case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; + esac + eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +-printf "%s\n" "$ac_try_echo"; } >&5 ++$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then +@@ -1672,15 +1794,14 @@ printf "%s\n" "$ac_try_echo"; } >&5 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err +- } && test -s conftest.$ac_objext +-then : ++ } && test -s conftest.$ac_objext; then : + ac_retval=0 +-else $as_nop +- printf "%s\n" "$as_me: failed program was:" >&5 ++else ++ $as_echo "$as_me: failed program was:" >&5 + sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +@@ -1696,14 +1817,14 @@ fi + ac_fn_c_try_link () + { + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack +- rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext ++ rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" + case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; + esac + eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +-printf "%s\n" "$ac_try_echo"; } >&5 ++$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then +@@ -1711,18 +1832,17 @@ printf "%s\n" "$ac_try_echo"; } >&5 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext +- } +-then : ++ }; then : + ac_retval=0 +-else $as_nop +- printf "%s\n" "$as_me: failed program was:" >&5 ++else ++ $as_echo "$as_me: failed program was:" >&5 + sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +@@ -1743,12 +1863,11 @@ fi + ac_fn_c_check_func () + { + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +-printf %s "checking for $2... " >&6; } +-if eval test \${$3+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 ++$as_echo_n "checking for $2... " >&6; } ++if eval \${$3+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + /* Define $2 to an innocuous variant, in case declares $2. +@@ -1756,9 +1875,16 @@ else $as_nop + #define $2 innocuous_$2 + + /* System header to define __stub macros and hopefully few prototypes, +- which can conflict with char $2 (); below. */ ++ which can conflict with char $2 (); below. ++ Prefer to if __STDC__ is defined, since ++ exists even on freestanding compilers. */ ++ ++#ifdef __STDC__ ++# include ++#else ++# include ++#endif + +-#include + #undef $2 + + /* Override any GCC internal prototype to avoid an error. +@@ -1776,25 +1902,24 @@ choke me + #endif + + int +-main (void) ++main () + { + return $2 (); + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_link "$LINENO" +-then : ++if ac_fn_c_try_link "$LINENO"; then : + eval "$3=yes" +-else $as_nop ++else + eval "$3=no" + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + fi + eval ac_res=\$$3 +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +-printf "%s\n" "$ac_res" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 ++$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + + } # ac_fn_c_check_func +@@ -1811,7 +1936,7 @@ case "(($ac_try" in + *) ac_try_echo=$ac_try;; + esac + eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +-printf "%s\n" "$ac_try_echo"; } >&5 ++$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then +@@ -1819,15 +1944,14 @@ printf "%s\n" "$ac_try_echo"; } >&5 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err +- } +-then : ++ }; then : + ac_retval=0 +-else $as_nop +- printf "%s\n" "$as_me: failed program was:" >&5 ++else ++ $as_echo "$as_me: failed program was:" >&5 + sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +@@ -1843,14 +1967,14 @@ fi + ac_fn_cxx_try_link () + { + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack +- rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext ++ rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" + case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; + esac + eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +-printf "%s\n" "$ac_try_echo"; } >&5 ++$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then +@@ -1858,18 +1982,17 @@ printf "%s\n" "$ac_try_echo"; } >&5 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext +- } +-then : ++ }; then : + ac_retval=0 +-else $as_nop +- printf "%s\n" "$as_me: failed program was:" >&5 ++else ++ $as_echo "$as_me: failed program was:" >&5 + sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +@@ -1891,12 +2014,11 @@ fi + ac_fn_c_find_intX_t () + { + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for int$2_t" >&5 +-printf %s "checking for int$2_t... " >&6; } +-if eval test \${$3+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for int$2_t" >&5 ++$as_echo_n "checking for int$2_t... " >&6; } ++if eval \${$3+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + eval "$3=no" + # Order is important - never check a type that is potentially smaller + # than half of the expected target width. +@@ -1907,7 +2029,7 @@ else $as_nop + $ac_includes_default + enum { N = $2 / 2 - 1 }; + int +-main (void) ++main () + { + static int test_array [1 - 2 * !(0 < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1))]; + test_array [0] = 0; +@@ -1917,14 +2039,13 @@ return test_array [0]; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + $ac_includes_default + enum { N = $2 / 2 - 1 }; + int +-main (void) ++main () + { + static int test_array [1 - 2 * !(($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1) + < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 2))]; +@@ -1935,10 +2056,9 @@ return test_array [0]; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + +-else $as_nop ++else + case $ac_type in #( + int$2_t) : + eval "$3=yes" ;; #( +@@ -1946,20 +2066,19 @@ else $as_nop + eval "$3=\$ac_type" ;; + esac + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +- if eval test \"x\$"$3"\" = x"no" +-then : ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ++ if eval test \"x\$"$3"\" = x"no"; then : + +-else $as_nop ++else + break + fi + done + fi + eval ac_res=\$$3 +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +-printf "%s\n" "$ac_res" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 ++$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + + } # ac_fn_c_find_intX_t +@@ -1971,12 +2090,11 @@ printf "%s\n" "$ac_res" >&6; } + ac_fn_c_find_uintX_t () + { + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for uint$2_t" >&5 +-printf %s "checking for uint$2_t... " >&6; } +-if eval test \${$3+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uint$2_t" >&5 ++$as_echo_n "checking for uint$2_t... " >&6; } ++if eval \${$3+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + eval "$3=no" + # Order is important - never check a type that is potentially smaller + # than half of the expected target width. +@@ -1986,7 +2104,7 @@ else $as_nop + /* end confdefs.h. */ + $ac_includes_default + int +-main (void) ++main () + { + static int test_array [1 - 2 * !((($ac_type) -1 >> ($2 / 2 - 1)) >> ($2 / 2 - 1) == 3)]; + test_array [0] = 0; +@@ -1996,8 +2114,7 @@ return test_array [0]; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + case $ac_type in #( + uint$2_t) : + eval "$3=yes" ;; #( +@@ -2005,49 +2122,28 @@ then : + eval "$3=\$ac_type" ;; + esac + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +- if eval test \"x\$"$3"\" = x"no" +-then : ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ++ if eval test \"x\$"$3"\" = x"no"; then : + +-else $as_nop ++else + break + fi + done + fi + eval ac_res=\$$3 +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +-printf "%s\n" "$ac_res" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 ++$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + + } # ac_fn_c_find_uintX_t +-ac_configure_args_raw= +-for ac_arg +-do +- case $ac_arg in +- *\'*) +- ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; +- esac +- as_fn_append ac_configure_args_raw " '$ac_arg'" +-done +- +-case $ac_configure_args_raw in +- *$as_nl*) +- ac_safe_unquote= ;; +- *) +- ac_unsafe_z='|&;<>()$`\\"*?[ '' ' # This string ends in space, tab. +- ac_unsafe_a="$ac_unsafe_z#~" +- ac_safe_unquote="s/ '\\([^$ac_unsafe_a][^$ac_unsafe_z]*\\)'/ \\1/g" +- ac_configure_args_raw=` printf "%s\n" "$ac_configure_args_raw" | sed "$ac_safe_unquote"`;; +-esac +- + cat >config.log <<_ACEOF + This file contains any messages produced by compilers while + running configure, to aid debugging if configure makes a mistake. + + It was created by bolt plugin for ld $as_me 0.1, which was +-generated by GNU Autoconf 2.71. Invocation command line was ++generated by GNU Autoconf 2.69. Invocation command line was + +- $ $0$ac_configure_args_raw ++ $ $0 $@ + + _ACEOF + exec 5>>config.log +@@ -2080,12 +2176,8 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- printf "%s\n" "PATH: $as_dir" ++ test -z "$as_dir" && as_dir=. ++ $as_echo "PATH: $as_dir" + done + IFS=$as_save_IFS + +@@ -2120,7 +2212,7 @@ do + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) +- ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; ++ ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; +@@ -2155,13 +2247,11 @@ done + # WARNING: Use '\'' to represent an apostrophe within the trap. + # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. + trap 'exit_status=$? +- # Sanitize IFS. +- IFS=" "" $as_nl" + # Save into config.log some information that might help in debugging. + { + echo + +- printf "%s\n" "## ---------------- ## ++ $as_echo "## ---------------- ## + ## Cache variables. ## + ## ---------------- ##" + echo +@@ -2172,8 +2262,8 @@ trap 'exit_status=$? + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( +- *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +-printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; ++ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 ++$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( +@@ -2197,7 +2287,7 @@ printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} + ) + echo + +- printf "%s\n" "## ----------------- ## ++ $as_echo "## ----------------- ## + ## Output variables. ## + ## ----------------- ##" + echo +@@ -2205,14 +2295,14 @@ printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} + do + eval ac_val=\$$ac_var + case $ac_val in +- *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; ++ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac +- printf "%s\n" "$ac_var='\''$ac_val'\''" ++ $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then +- printf "%s\n" "## ------------------- ## ++ $as_echo "## ------------------- ## + ## File substitutions. ## + ## ------------------- ##" + echo +@@ -2220,15 +2310,15 @@ printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} + do + eval ac_val=\$$ac_var + case $ac_val in +- *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; ++ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac +- printf "%s\n" "$ac_var='\''$ac_val'\''" ++ $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then +- printf "%s\n" "## ----------- ## ++ $as_echo "## ----------- ## + ## confdefs.h. ## + ## ----------- ##" + echo +@@ -2236,8 +2326,8 @@ printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} + echo + fi + test "$ac_signal" != 0 && +- printf "%s\n" "$as_me: caught signal $ac_signal" +- printf "%s\n" "$as_me: exit $exit_status" ++ $as_echo "$as_me: caught signal $ac_signal" ++ $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && +@@ -2251,48 +2341,63 @@ ac_signal=0 + # confdefs.h avoids OS command line length limits that DEFS can exceed. + rm -f -r conftest* confdefs.h + +-printf "%s\n" "/* confdefs.h */" > confdefs.h ++$as_echo "/* confdefs.h */" > confdefs.h + + # Predefined preprocessor variables. + +-printf "%s\n" "#define PACKAGE_NAME \"$PACKAGE_NAME\"" >>confdefs.h ++cat >>confdefs.h <<_ACEOF ++#define PACKAGE_NAME "$PACKAGE_NAME" ++_ACEOF + +-printf "%s\n" "#define PACKAGE_TARNAME \"$PACKAGE_TARNAME\"" >>confdefs.h ++cat >>confdefs.h <<_ACEOF ++#define PACKAGE_TARNAME "$PACKAGE_TARNAME" ++_ACEOF + +-printf "%s\n" "#define PACKAGE_VERSION \"$PACKAGE_VERSION\"" >>confdefs.h ++cat >>confdefs.h <<_ACEOF ++#define PACKAGE_VERSION "$PACKAGE_VERSION" ++_ACEOF + +-printf "%s\n" "#define PACKAGE_STRING \"$PACKAGE_STRING\"" >>confdefs.h ++cat >>confdefs.h <<_ACEOF ++#define PACKAGE_STRING "$PACKAGE_STRING" ++_ACEOF + +-printf "%s\n" "#define PACKAGE_BUGREPORT \"$PACKAGE_BUGREPORT\"" >>confdefs.h ++cat >>confdefs.h <<_ACEOF ++#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" ++_ACEOF + +-printf "%s\n" "#define PACKAGE_URL \"$PACKAGE_URL\"" >>confdefs.h ++cat >>confdefs.h <<_ACEOF ++#define PACKAGE_URL "$PACKAGE_URL" ++_ACEOF + + + # Let the site file select an alternate cache file if it wants to. + # Prefer an explicitly selected file to automatically selected ones. ++ac_site_file1=NONE ++ac_site_file2=NONE + if test -n "$CONFIG_SITE"; then +- ac_site_files="$CONFIG_SITE" ++ # We do not want a PATH search for config.site. ++ case $CONFIG_SITE in #(( ++ -*) ac_site_file1=./$CONFIG_SITE;; ++ */*) ac_site_file1=$CONFIG_SITE;; ++ *) ac_site_file1=./$CONFIG_SITE;; ++ esac + elif test "x$prefix" != xNONE; then +- ac_site_files="$prefix/share/config.site $prefix/etc/config.site" ++ ac_site_file1=$prefix/share/config.site ++ ac_site_file2=$prefix/etc/config.site + else +- ac_site_files="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site" ++ ac_site_file1=$ac_default_prefix/share/config.site ++ ac_site_file2=$ac_default_prefix/etc/config.site + fi +- +-for ac_site_file in $ac_site_files ++for ac_site_file in "$ac_site_file1" "$ac_site_file2" + do +- case $ac_site_file in #( +- */*) : +- ;; #( +- *) : +- ac_site_file=./$ac_site_file ;; +-esac +- if test -f "$ac_site_file" && test -r "$ac_site_file"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +-printf "%s\n" "$as_me: loading site script $ac_site_file" >&6;} ++ test "x$ac_site_file" = xNONE && continue ++ if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 ++$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ +- || { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + as_fn_error $? "failed to load site script $ac_site_file + See \`config.log' for more details" "$LINENO" 5; } + fi +@@ -2302,745 +2407,139 @@ if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +-printf "%s\n" "$as_me: loading cache $cache_file" >&6;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 ++$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +-printf "%s\n" "$as_me: creating cache $cache_file" >&6;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 ++$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file + fi + +-as_fn_append ac_header_c_list " stdio.h stdio_h HAVE_STDIO_H" +-# Test code for whether the C compiler supports C89 (global declarations) +-ac_c_conftest_c89_globals=' +-/* Does the compiler advertise C89 conformance? +- Do not test the value of __STDC__, because some compilers set it to 0 +- while being otherwise adequately conformant. */ +-#if !defined __STDC__ +-# error "Compiler does not advertise C89 conformance" +-#endif ++# Check that the precious variables saved in the cache have kept the same ++# value. ++ac_cache_corrupted=false ++for ac_var in $ac_precious_vars; do ++ eval ac_old_set=\$ac_cv_env_${ac_var}_set ++ eval ac_new_set=\$ac_env_${ac_var}_set ++ eval ac_old_val=\$ac_cv_env_${ac_var}_value ++ eval ac_new_val=\$ac_env_${ac_var}_value ++ case $ac_old_set,$ac_new_set in ++ set,) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 ++$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ++ ac_cache_corrupted=: ;; ++ ,set) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 ++$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ++ ac_cache_corrupted=: ;; ++ ,);; ++ *) ++ if test "x$ac_old_val" != "x$ac_new_val"; then ++ # differences in whitespace do not lead to failure. ++ ac_old_val_w=`echo x $ac_old_val` ++ ac_new_val_w=`echo x $ac_new_val` ++ if test "$ac_old_val_w" != "$ac_new_val_w"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 ++$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ++ ac_cache_corrupted=: ++ else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 ++$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} ++ eval $ac_var=\$ac_old_val ++ fi ++ { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 ++$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 ++$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} ++ fi;; ++ esac ++ # Pass precious variables to config.status. ++ if test "$ac_new_set" = set; then ++ case $ac_new_val in ++ *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; ++ *) ac_arg=$ac_var=$ac_new_val ;; ++ esac ++ case " $ac_configure_args " in ++ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. ++ *) as_fn_append ac_configure_args " '$ac_arg'" ;; ++ esac ++ fi ++done ++if $ac_cache_corrupted; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 ++$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} ++ as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 ++fi ++## -------------------- ## ++## Main body of script. ## ++## -------------------- ## + +-#include +-#include +-struct stat; +-/* Most of the following tests are stolen from RCS 5.7 src/conf.sh. */ +-struct buf { int x; }; +-struct buf * (*rcsopen) (struct buf *, struct stat *, int); +-static char *e (p, i) +- char **p; +- int i; +-{ +- return p[i]; +-} +-static char *f (char * (*g) (char **, int), char **p, ...) +-{ +- char *s; +- va_list v; +- va_start (v,p); +- s = g (p, va_arg (v,int)); +- va_end (v); +- return s; +-} +- +-/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has +- function prototypes and stuff, but not \xHH hex character constants. +- These do not provoke an error unfortunately, instead are silently treated +- as an "x". The following induces an error, until -std is added to get +- proper ANSI mode. Curiously \x00 != x always comes out true, for an +- array size at least. It is necessary to write \x00 == 0 to get something +- that is true only with -std. */ +-int osf4_cc_array ['\''\x00'\'' == 0 ? 1 : -1]; +- +-/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters +- inside strings and character constants. */ +-#define FOO(x) '\''x'\'' +-int xlc6_cc_array[FOO(a) == '\''x'\'' ? 1 : -1]; +- +-int test (int i, double x); +-struct s1 {int (*f) (int a);}; +-struct s2 {int (*f) (double a);}; +-int pairnames (int, char **, int *(*)(struct buf *, struct stat *, int), +- int, int);' +- +-# Test code for whether the C compiler supports C89 (body of main). +-ac_c_conftest_c89_main=' +-ok |= (argc == 0 || f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]); +-' +- +-# Test code for whether the C compiler supports C99 (global declarations) +-ac_c_conftest_c99_globals=' +-// Does the compiler advertise C99 conformance? +-#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 199901L +-# error "Compiler does not advertise C99 conformance" +-#endif +- +-#include +-extern int puts (const char *); +-extern int printf (const char *, ...); +-extern int dprintf (int, const char *, ...); +-extern void *malloc (size_t); +- +-// Check varargs macros. These examples are taken from C99 6.10.3.5. +-// dprintf is used instead of fprintf to avoid needing to declare +-// FILE and stderr. +-#define debug(...) dprintf (2, __VA_ARGS__) +-#define showlist(...) puts (#__VA_ARGS__) +-#define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__)) +-static void +-test_varargs_macros (void) +-{ +- int x = 1234; +- int y = 5678; +- debug ("Flag"); +- debug ("X = %d\n", x); +- showlist (The first, second, and third items.); +- report (x>y, "x is %d but y is %d", x, y); +-} +- +-// Check long long types. +-#define BIG64 18446744073709551615ull +-#define BIG32 4294967295ul +-#define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0) +-#if !BIG_OK +- #error "your preprocessor is broken" +-#endif +-#if BIG_OK +-#else +- #error "your preprocessor is broken" +-#endif +-static long long int bignum = -9223372036854775807LL; +-static unsigned long long int ubignum = BIG64; +- +-struct incomplete_array +-{ +- int datasize; +- double data[]; +-}; +- +-struct named_init { +- int number; +- const wchar_t *name; +- double average; +-}; +- +-typedef const char *ccp; +- +-static inline int +-test_restrict (ccp restrict text) +-{ +- // See if C++-style comments work. +- // Iterate through items via the restricted pointer. +- // Also check for declarations in for loops. +- for (unsigned int i = 0; *(text+i) != '\''\0'\''; ++i) +- continue; +- return 0; +-} +- +-// Check varargs and va_copy. +-static bool +-test_varargs (const char *format, ...) +-{ +- va_list args; +- va_start (args, format); +- va_list args_copy; +- va_copy (args_copy, args); +- +- const char *str = ""; +- int number = 0; +- float fnumber = 0; +- +- while (*format) +- { +- switch (*format++) +- { +- case '\''s'\'': // string +- str = va_arg (args_copy, const char *); +- break; +- case '\''d'\'': // int +- number = va_arg (args_copy, int); +- break; +- case '\''f'\'': // float +- fnumber = va_arg (args_copy, double); +- break; +- default: +- break; +- } +- } +- va_end (args_copy); +- va_end (args); +- +- return *str && number && fnumber; +-} +-' +- +-# Test code for whether the C compiler supports C99 (body of main). +-ac_c_conftest_c99_main=' +- // Check bool. +- _Bool success = false; +- success |= (argc != 0); +- +- // Check restrict. +- if (test_restrict ("String literal") == 0) +- success = true; +- char *restrict newvar = "Another string"; +- +- // Check varargs. +- success &= test_varargs ("s, d'\'' f .", "string", 65, 34.234); +- test_varargs_macros (); +- +- // Check flexible array members. +- struct incomplete_array *ia = +- malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10)); +- ia->datasize = 10; +- for (int i = 0; i < ia->datasize; ++i) +- ia->data[i] = i * 1.234; +- +- // Check named initializers. +- struct named_init ni = { +- .number = 34, +- .name = L"Test wide string", +- .average = 543.34343, +- }; +- +- ni.number = 58; +- +- int dynamic_array[ni.number]; +- dynamic_array[0] = argv[0][0]; +- dynamic_array[ni.number - 1] = 543; +- +- // work around unused variable warnings +- ok |= (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == '\''x'\'' +- || dynamic_array[ni.number - 1] != 543); +-' +- +-# Test code for whether the C compiler supports C11 (global declarations) +-ac_c_conftest_c11_globals=' +-// Does the compiler advertise C11 conformance? +-#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 201112L +-# error "Compiler does not advertise C11 conformance" +-#endif +- +-// Check _Alignas. +-char _Alignas (double) aligned_as_double; +-char _Alignas (0) no_special_alignment; +-extern char aligned_as_int; +-char _Alignas (0) _Alignas (int) aligned_as_int; +- +-// Check _Alignof. +-enum +-{ +- int_alignment = _Alignof (int), +- int_array_alignment = _Alignof (int[100]), +- char_alignment = _Alignof (char) +-}; +-_Static_assert (0 < -_Alignof (int), "_Alignof is signed"); +- +-// Check _Noreturn. +-int _Noreturn does_not_return (void) { for (;;) continue; } +- +-// Check _Static_assert. +-struct test_static_assert +-{ +- int x; +- _Static_assert (sizeof (int) <= sizeof (long int), +- "_Static_assert does not work in struct"); +- long int y; +-}; +- +-// Check UTF-8 literals. +-#define u8 syntax error! +-char const utf8_literal[] = u8"happens to be ASCII" "another string"; +- +-// Check duplicate typedefs. +-typedef long *long_ptr; +-typedef long int *long_ptr; +-typedef long_ptr long_ptr; +- +-// Anonymous structures and unions -- taken from C11 6.7.2.1 Example 1. +-struct anonymous +-{ +- union { +- struct { int i; int j; }; +- struct { int k; long int l; } w; +- }; +- int m; +-} v1; +-' +- +-# Test code for whether the C compiler supports C11 (body of main). +-ac_c_conftest_c11_main=' +- _Static_assert ((offsetof (struct anonymous, i) +- == offsetof (struct anonymous, w.k)), +- "Anonymous union alignment botch"); +- v1.i = 2; +- v1.w.k = 5; +- ok |= v1.i != 5; +-' +- +-# Test code for whether the C compiler supports C11 (complete). +-ac_c_conftest_c11_program="${ac_c_conftest_c89_globals} +-${ac_c_conftest_c99_globals} +-${ac_c_conftest_c11_globals} +- +-int +-main (int argc, char **argv) +-{ +- int ok = 0; +- ${ac_c_conftest_c89_main} +- ${ac_c_conftest_c99_main} +- ${ac_c_conftest_c11_main} +- return ok; +-} +-" +- +-# Test code for whether the C compiler supports C99 (complete). +-ac_c_conftest_c99_program="${ac_c_conftest_c89_globals} +-${ac_c_conftest_c99_globals} +- +-int +-main (int argc, char **argv) +-{ +- int ok = 0; +- ${ac_c_conftest_c89_main} +- ${ac_c_conftest_c99_main} +- return ok; +-} +-" +- +-# Test code for whether the C compiler supports C89 (complete). +-ac_c_conftest_c89_program="${ac_c_conftest_c89_globals} +- +-int +-main (int argc, char **argv) +-{ +- int ok = 0; +- ${ac_c_conftest_c89_main} +- return ok; +-} +-" +- +-as_fn_append ac_header_c_list " stdlib.h stdlib_h HAVE_STDLIB_H" +-as_fn_append ac_header_c_list " string.h string_h HAVE_STRING_H" +-as_fn_append ac_header_c_list " inttypes.h inttypes_h HAVE_INTTYPES_H" +-as_fn_append ac_header_c_list " stdint.h stdint_h HAVE_STDINT_H" +-as_fn_append ac_header_c_list " strings.h strings_h HAVE_STRINGS_H" +-as_fn_append ac_header_c_list " sys/stat.h sys_stat_h HAVE_SYS_STAT_H" +-as_fn_append ac_header_c_list " sys/types.h sys_types_h HAVE_SYS_TYPES_H" +-as_fn_append ac_header_c_list " unistd.h unistd_h HAVE_UNISTD_H" +-as_fn_append ac_header_c_list " wchar.h wchar_h HAVE_WCHAR_H" +-as_fn_append ac_header_c_list " minix/config.h minix_config_h HAVE_MINIX_CONFIG_H" +-# Test code for whether the C++ compiler supports C++98 (global declarations) +-ac_cxx_conftest_cxx98_globals=' +-// Does the compiler advertise C++98 conformance? +-#if !defined __cplusplus || __cplusplus < 199711L +-# error "Compiler does not advertise C++98 conformance" +-#endif +- +-// These inclusions are to reject old compilers that +-// lack the unsuffixed header files. +-#include +-#include +- +-// and are *not* freestanding headers in C++98. +-extern void assert (int); +-namespace std { +- extern int strcmp (const char *, const char *); +-} +- +-// Namespaces, exceptions, and templates were all added after "C++ 2.0". +-using std::exception; +-using std::strcmp; +- +-namespace { +- +-void test_exception_syntax() +-{ +- try { +- throw "test"; +- } catch (const char *s) { +- // Extra parentheses suppress a warning when building autoconf itself, +- // due to lint rules shared with more typical C programs. +- assert (!(strcmp) (s, "test")); +- } +-} +- +-template struct test_template +-{ +- T const val; +- explicit test_template(T t) : val(t) {} +- template T add(U u) { return static_cast(u) + val; } +-}; +- +-} // anonymous namespace +-' +- +-# Test code for whether the C++ compiler supports C++98 (body of main) +-ac_cxx_conftest_cxx98_main=' +- assert (argc); +- assert (! argv[0]); +-{ +- test_exception_syntax (); +- test_template tt (2.0); +- assert (tt.add (4) == 6.0); +- assert (true && !false); +-} +-' +- +-# Test code for whether the C++ compiler supports C++11 (global declarations) +-ac_cxx_conftest_cxx11_globals=' +-// Does the compiler advertise C++ 2011 conformance? +-#if !defined __cplusplus || __cplusplus < 201103L +-# error "Compiler does not advertise C++11 conformance" +-#endif +- +-namespace cxx11test +-{ +- constexpr int get_val() { return 20; } +- +- struct testinit +- { +- int i; +- double d; +- }; +- +- class delegate +- { +- public: +- delegate(int n) : n(n) {} +- delegate(): delegate(2354) {} +- +- virtual int getval() { return this->n; }; +- protected: +- int n; +- }; +- +- class overridden : public delegate +- { +- public: +- overridden(int n): delegate(n) {} +- virtual int getval() override final { return this->n * 2; } +- }; +- +- class nocopy +- { +- public: +- nocopy(int i): i(i) {} +- nocopy() = default; +- nocopy(const nocopy&) = delete; +- nocopy & operator=(const nocopy&) = delete; +- private: +- int i; +- }; +- +- // for testing lambda expressions +- template Ret eval(Fn f, Ret v) +- { +- return f(v); +- } +- +- // for testing variadic templates and trailing return types +- template auto sum(V first) -> V +- { +- return first; +- } +- template auto sum(V first, Args... rest) -> V +- { +- return first + sum(rest...); +- } +-} +-' +- +-# Test code for whether the C++ compiler supports C++11 (body of main) +-ac_cxx_conftest_cxx11_main=' +-{ +- // Test auto and decltype +- auto a1 = 6538; +- auto a2 = 48573953.4; +- auto a3 = "String literal"; +- +- int total = 0; +- for (auto i = a3; *i; ++i) { total += *i; } +- +- decltype(a2) a4 = 34895.034; +-} +-{ +- // Test constexpr +- short sa[cxx11test::get_val()] = { 0 }; +-} +-{ +- // Test initializer lists +- cxx11test::testinit il = { 4323, 435234.23544 }; +-} +-{ +- // Test range-based for +- int array[] = {9, 7, 13, 15, 4, 18, 12, 10, 5, 3, +- 14, 19, 17, 8, 6, 20, 16, 2, 11, 1}; +- for (auto &x : array) { x += 23; } +-} +-{ +- // Test lambda expressions +- using cxx11test::eval; +- assert (eval ([](int x) { return x*2; }, 21) == 42); +- double d = 2.0; +- assert (eval ([&](double x) { return d += x; }, 3.0) == 5.0); +- assert (d == 5.0); +- assert (eval ([=](double x) mutable { return d += x; }, 4.0) == 9.0); +- assert (d == 5.0); +-} +-{ +- // Test use of variadic templates +- using cxx11test::sum; +- auto a = sum(1); +- auto b = sum(1, 2); +- auto c = sum(1.0, 2.0, 3.0); +-} +-{ +- // Test constructor delegation +- cxx11test::delegate d1; +- cxx11test::delegate d2(); +- cxx11test::delegate d3(45); +-} +-{ +- // Test override and final +- cxx11test::overridden o1(55464); +-} +-{ +- // Test nullptr +- char *c = nullptr; +-} +-{ +- // Test template brackets +- test_template<::test_template> v(test_template(12)); +-} +-{ +- // Unicode literals +- char const *utf8 = u8"UTF-8 string \u2500"; +- char16_t const *utf16 = u"UTF-8 string \u2500"; +- char32_t const *utf32 = U"UTF-32 string \u2500"; +-} +-' +- +-# Test code for whether the C compiler supports C++11 (complete). +-ac_cxx_conftest_cxx11_program="${ac_cxx_conftest_cxx98_globals} +-${ac_cxx_conftest_cxx11_globals} +- +-int +-main (int argc, char **argv) +-{ +- int ok = 0; +- ${ac_cxx_conftest_cxx98_main} +- ${ac_cxx_conftest_cxx11_main} +- return ok; +-} +-" +- +-# Test code for whether the C compiler supports C++98 (complete). +-ac_cxx_conftest_cxx98_program="${ac_cxx_conftest_cxx98_globals} +-int +-main (int argc, char **argv) +-{ +- int ok = 0; +- ${ac_cxx_conftest_cxx98_main} +- return ok; +-} +-" ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +-# Auxiliary files required by this configure script. +-ac_aux_files="ltmain.sh compile missing install-sh config.guess config.sub" + +-# Locations in which to look for auxiliary files. +-ac_aux_dir_candidates="${srcdir}${PATH_SEPARATOR}${srcdir}/..${PATH_SEPARATOR}${srcdir}/../.." + +-# Search for a directory containing all of the required auxiliary files, +-# $ac_aux_files, from the $PATH-style list $ac_aux_dir_candidates. +-# If we don't find one directory that contains all the files we need, +-# we report the set of missing files from the *first* directory in +-# $ac_aux_dir_candidates and give up. +-ac_missing_aux_files="" +-ac_first_candidate=: +-printf "%s\n" "$as_me:${as_lineno-$LINENO}: looking for aux files: $ac_aux_files" >&5 +-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-as_found=false +-for as_dir in $ac_aux_dir_candidates +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- as_found=: + +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: trying $as_dir" >&5 +- ac_aux_dir_found=yes +- ac_install_sh= +- for ac_aux in $ac_aux_files +- do +- # As a special case, if "install-sh" is required, that requirement +- # can be satisfied by any of "install-sh", "install.sh", or "shtool", +- # and $ac_install_sh is set appropriately for whichever one is found. +- if test x"$ac_aux" = x"install-sh" +- then +- if test -f "${as_dir}install-sh"; then +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}install-sh found" >&5 +- ac_install_sh="${as_dir}install-sh -c" +- elif test -f "${as_dir}install.sh"; then +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}install.sh found" >&5 +- ac_install_sh="${as_dir}install.sh -c" +- elif test -f "${as_dir}shtool"; then +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}shtool found" >&5 +- ac_install_sh="${as_dir}shtool install -c" +- else +- ac_aux_dir_found=no +- if $ac_first_candidate; then +- ac_missing_aux_files="${ac_missing_aux_files} install-sh" +- else +- break +- fi +- fi +- else +- if test -f "${as_dir}${ac_aux}"; then +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}${ac_aux} found" >&5 +- else +- ac_aux_dir_found=no +- if $ac_first_candidate; then +- ac_missing_aux_files="${ac_missing_aux_files} ${ac_aux}" +- else +- break +- fi +- fi +- fi +- done +- if test "$ac_aux_dir_found" = yes; then +- ac_aux_dir="$as_dir" ++ac_aux_dir= ++for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do ++ if test -f "$ac_dir/install-sh"; then ++ ac_aux_dir=$ac_dir ++ ac_install_sh="$ac_aux_dir/install-sh -c" ++ break ++ elif test -f "$ac_dir/install.sh"; then ++ ac_aux_dir=$ac_dir ++ ac_install_sh="$ac_aux_dir/install.sh -c" ++ break ++ elif test -f "$ac_dir/shtool"; then ++ ac_aux_dir=$ac_dir ++ ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +- ac_first_candidate=false +- +- as_found=false + done +-IFS=$as_save_IFS +-if $as_found +-then : +- +-else $as_nop +- as_fn_error $? "cannot find required auxiliary files:$ac_missing_aux_files" "$LINENO" 5 ++if test -z "$ac_aux_dir"; then ++ as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 + fi + +- + # These three variables are undocumented and unsupported, + # and are intended to be withdrawn in a future Autoconf release. + # They can cause serious problems if a builder's source tree is in a directory + # whose full name contains unusual characters. +-if test -f "${ac_aux_dir}config.guess"; then +- ac_config_guess="$SHELL ${ac_aux_dir}config.guess" +-fi +-if test -f "${ac_aux_dir}config.sub"; then +- ac_config_sub="$SHELL ${ac_aux_dir}config.sub" +-fi +-if test -f "$ac_aux_dir/configure"; then +- ac_configure="$SHELL ${ac_aux_dir}configure" +-fi ++ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ++ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ++ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + +-# Check that the precious variables saved in the cache have kept the same +-# value. +-ac_cache_corrupted=false +-for ac_var in $ac_precious_vars; do +- eval ac_old_set=\$ac_cv_env_${ac_var}_set +- eval ac_new_set=\$ac_env_${ac_var}_set +- eval ac_old_val=\$ac_cv_env_${ac_var}_value +- eval ac_new_val=\$ac_env_${ac_var}_value +- case $ac_old_set,$ac_new_set in +- set,) +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +-printf "%s\n" "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} +- ac_cache_corrupted=: ;; +- ,set) +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +-printf "%s\n" "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} +- ac_cache_corrupted=: ;; +- ,);; +- *) +- if test "x$ac_old_val" != "x$ac_new_val"; then +- # differences in whitespace do not lead to failure. +- ac_old_val_w=`echo x $ac_old_val` +- ac_new_val_w=`echo x $ac_new_val` +- if test "$ac_old_val_w" != "$ac_new_val_w"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +-printf "%s\n" "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} +- ac_cache_corrupted=: +- else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +-printf "%s\n" "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} +- eval $ac_var=\$ac_old_val +- fi +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +-printf "%s\n" "$as_me: former value: \`$ac_old_val'" >&2;} +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +-printf "%s\n" "$as_me: current value: \`$ac_new_val'" >&2;} +- fi;; +- esac +- # Pass precious variables to config.status. +- if test "$ac_new_set" = set; then +- case $ac_new_val in +- *\'*) ac_arg=$ac_var=`printf "%s\n" "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; +- *) ac_arg=$ac_var=$ac_new_val ;; +- esac +- case " $ac_configure_args " in +- *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. +- *) as_fn_append ac_configure_args " '$ac_arg'" ;; +- esac +- fi +-done +-if $ac_cache_corrupted; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +-printf "%s\n" "$as_me: error: changes in the environment can compromise the build" >&2;} +- as_fn_error $? "run \`${MAKE-make} distclean' and/or \`rm $cache_file' +- and start over" "$LINENO" 5 +-fi +-## -------------------- ## +-## Main body of script. ## +-## -------------------- ## +- +-ac_ext=c +-ac_cpp='$CPP $CPPFLAGS' +-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +-ac_compiler_gnu=$ac_cv_c_compiler_gnu + ++# Make sure we can run config.sub. ++$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || ++ as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + +- +- +- +- # Make sure we can run config.sub. +-$SHELL "${ac_aux_dir}config.sub" sun4 >/dev/null 2>&1 || +- as_fn_error $? "cannot run $SHELL ${ac_aux_dir}config.sub" "$LINENO" 5 +- +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +-printf %s "checking build system type... " >&6; } +-if test ${ac_cv_build+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 ++$as_echo_n "checking build system type... " >&6; } ++if ${ac_cv_build+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + ac_build_alias=$build_alias + test "x$ac_build_alias" = x && +- ac_build_alias=`$SHELL "${ac_aux_dir}config.guess"` ++ ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` + test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +-ac_cv_build=`$SHELL "${ac_aux_dir}config.sub" $ac_build_alias` || +- as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $ac_build_alias failed" "$LINENO" 5 ++ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || ++ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +-printf "%s\n" "$ac_cv_build" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 ++$as_echo "$ac_cv_build" >&6; } + case $ac_cv_build in + *-*-*) ;; + *) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +@@ -3059,22 +2558,21 @@ IFS=$ac_save_IFS + case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +-printf %s "checking host system type... " >&6; } +-if test ${ac_cv_host+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 ++$as_echo_n "checking host system type... " >&6; } ++if ${ac_cv_host+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build + else +- ac_cv_host=`$SHELL "${ac_aux_dir}config.sub" $host_alias` || +- as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $host_alias failed" "$LINENO" 5 ++ ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || ++ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 + fi + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +-printf "%s\n" "$ac_cv_host" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 ++$as_echo "$ac_cv_host" >&6; } + case $ac_cv_host in + *-*-*) ;; + *) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +@@ -3093,22 +2591,21 @@ IFS=$ac_save_IFS + case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking target system type" >&5 +-printf %s "checking target system type... " >&6; } +-if test ${ac_cv_target+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking target system type" >&5 ++$as_echo_n "checking target system type... " >&6; } ++if ${ac_cv_target+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test "x$target_alias" = x; then + ac_cv_target=$ac_cv_host + else +- ac_cv_target=`$SHELL "${ac_aux_dir}config.sub" $target_alias` || +- as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $target_alias failed" "$LINENO" 5 ++ ac_cv_target=`$SHELL "$ac_aux_dir/config.sub" $target_alias` || ++ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $target_alias failed" "$LINENO" 5 + fi + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_target" >&5 +-printf "%s\n" "$ac_cv_target" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_target" >&5 ++$as_echo "$ac_cv_target" >&6; } + case $ac_cv_target in + *-*-*) ;; + *) as_fn_error $? "invalid value of canonical target" "$LINENO" 5;; +@@ -3133,11 +2630,54 @@ test -n "$target_alias" && + test "$program_prefix$program_suffix$program_transform_name" = \ + NONENONEs,x,x, && + program_prefix=${target_alias}- +-GCC_TOPLEV_SUBDIRS +-am__api_version='1.16' ++ case ${build_alias} in ++ "") build_noncanonical=${build} ;; ++ *) build_noncanonical=${build_alias} ;; ++esac ++ ++ case ${host_alias} in ++ "") host_noncanonical=${build_noncanonical} ;; ++ *) host_noncanonical=${host_alias} ;; ++esac ++ ++ case ${target_alias} in ++ "") target_noncanonical=${host_noncanonical} ;; ++ *) target_noncanonical=${target_alias} ;; ++esac ++ ++ ++# post-stage1 host modules use a different CC_FOR_BUILD so, in order to ++# have matching libraries, they should use host libraries: Makefile.tpl ++# arranges to pass --with-build-libsubdir=$(HOST_SUBDIR). ++# However, they still use the build modules, because the corresponding ++# host modules (e.g. bison) are only built for the host when bootstrap ++# finishes. So: ++# - build_subdir is where we find build modules, and never changes. ++# - build_libsubdir is where we find build libraries, and can be overridden. ++ ++# Prefix 'build-' so this never conflicts with target_subdir. ++build_subdir="build-${build_noncanonical}" ++ ++# Check whether --with-build-libsubdir was given. ++if test "${with_build_libsubdir+set}" = set; then : ++ withval=$with_build_libsubdir; build_libsubdir="$withval" ++else ++ build_libsubdir="$build_subdir" ++fi ++ ++# --srcdir=. covers the toplevel, while "test -d" covers the subdirectories ++if ( test $srcdir = . && test -d gcc ) \ ++ || test -d $srcdir/../host-${host_noncanonical}; then ++ host_subdir="host-${host_noncanonical}" ++else ++ host_subdir=. ++fi ++# No prefix. ++target_subdir=${target_noncanonical} + ++am__api_version='1.16' + +- # Find a good install program. We prefer a C program (faster), ++# Find a good install program. We prefer a C program (faster), + # so one script is as good as another. But avoid the broken or + # incompatible versions: + # SysV /etc/install, /usr/sbin/install +@@ -3151,25 +2691,20 @@ am__api_version='1.16' + # OS/2's system install, which has a completely different semantic + # ./install, which can be erroneously created by make from ./install.sh. + # Reject install programs that cannot install multiple files. +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +-printf %s "checking for a BSD-compatible install... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 ++$as_echo_n "checking for a BSD-compatible install... " >&6; } + if test -z "$INSTALL"; then +-if test ${ac_cv_path_install+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++if ${ac_cv_path_install+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- # Account for fact that we put trailing slashes in our PATH walk. +-case $as_dir in #(( +- ./ | /[cC]/* | \ ++ test -z "$as_dir" && as_dir=. ++ # Account for people who put trailing slashes in PATH elements. ++case $as_dir/ in #(( ++ ./ | .// | /[cC]/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + /usr/ucb/* ) ;; +@@ -3179,13 +2714,13 @@ case $as_dir in #(( + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && +- grep dspmsg "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then ++ grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && +- grep pwplus "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then ++ grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else +@@ -3193,12 +2728,12 @@ case $as_dir in #(( + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir +- if "$as_dir$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir/" && ++ if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then +- ac_cv_path_install="$as_dir$ac_prog$ac_exec_ext -c" ++ ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi +@@ -3214,7 +2749,7 @@ IFS=$as_save_IFS + rm -rf conftest.one conftest.two conftest.dir + + fi +- if test ${ac_cv_path_install+y}; then ++ if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a +@@ -3224,8 +2759,8 @@ fi + INSTALL=$ac_install_sh + fi + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +-printf "%s\n" "$INSTALL" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 ++$as_echo "$INSTALL" >&6; } + + # Use test -z because SunOS4 sh mishandles braces in ${var-val}. + # It thinks the first close brace ends the variable substitution. +@@ -3235,8 +2770,8 @@ test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + + test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 +-printf %s "checking whether build environment is sane... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 ++$as_echo_n "checking whether build environment is sane... " >&6; } + # Reject unsafe characters in $srcdir or the absolute working directory + # name. Accept space and tab only in the latter. + am_lf=' +@@ -3290,8 +2825,8 @@ else + as_fn_error $? "newly created file is older than distributed files! + Check your system clock" "$LINENO" 5 + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +-printf "%s\n" "yes" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 ++$as_echo "yes" >&6; } + # If we didn't sleep, we still need to ensure time stamps of config.status and + # generated files are strictly newer. + am_sleep_pid= +@@ -3310,23 +2845,26 @@ test "$program_suffix" != NONE && + # Double any \ or $. + # By default was `s,x,x', remove it if useless. + ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' +-program_transform_name=`printf "%s\n" "$program_transform_name" | sed "$ac_script"` +- ++program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` + + # Expand $ac_aux_dir to an absolute path. + am_aux_dir=`cd "$ac_aux_dir" && pwd` + +- +- if test x"${MISSING+set}" != xset; then +- MISSING="\${SHELL} '$am_aux_dir/missing'" ++if test x"${MISSING+set}" != xset; then ++ case $am_aux_dir in ++ *\ * | *\ *) ++ MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; ++ *) ++ MISSING="\${SHELL} $am_aux_dir/missing" ;; ++ esac + fi + # Use eval to expand $SHELL + if eval "$MISSING --is-lightweight"; then + am_missing_run="$MISSING " + else + am_missing_run= +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 +-printf "%s\n" "$as_me: WARNING: 'missing' script is too old or missing" >&2;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 ++$as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;} + fi + + if test x"${install_sh+set}" != xset; then +@@ -3346,12 +2884,11 @@ if test "$cross_compiling" != no; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. + set dummy ${ac_tool_prefix}strip; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_STRIP+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_STRIP+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. + else +@@ -3359,15 +2896,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -3378,11 +2911,11 @@ fi + fi + STRIP=$ac_cv_prog_STRIP + if test -n "$STRIP"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +-printf "%s\n" "$STRIP" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 ++$as_echo "$STRIP" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -3391,12 +2924,11 @@ if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. + set dummy strip; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_STRIP+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_STRIP+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. + else +@@ -3404,15 +2936,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -3423,11 +2951,11 @@ fi + fi + ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP + if test -n "$ac_ct_STRIP"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +-printf "%s\n" "$ac_ct_STRIP" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 ++$as_echo "$ac_ct_STRIP" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + if test "x$ac_ct_STRIP" = x; then +@@ -3435,8 +2963,8 @@ fi + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + STRIP=$ac_ct_STRIP +@@ -3448,31 +2976,25 @@ fi + fi + INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" + +- +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a race-free mkdir -p" >&5 +-printf %s "checking for a race-free mkdir -p... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 ++$as_echo_n "checking for a thread-safe mkdir -p... " >&6; } + if test -z "$MKDIR_P"; then +- if test ${ac_cv_path_mkdir+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ if ${ac_cv_path_mkdir+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_prog in mkdir gmkdir; do + for ac_exec_ext in '' $ac_executable_extensions; do +- as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext" || continue +- case `"$as_dir$ac_prog$ac_exec_ext" --version 2>&1` in #( +- 'mkdir ('*'coreutils) '* | \ +- 'BusyBox '* | \ ++ as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue ++ case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( ++ 'mkdir (GNU coreutils) '* | \ ++ 'mkdir (coreutils) '* | \ + 'mkdir (fileutils) '4.1*) +- ac_cv_path_mkdir=$as_dir$ac_prog$ac_exec_ext ++ ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext + break 3;; + esac + done +@@ -3483,7 +3005,7 @@ IFS=$as_save_IFS + fi + + test -d ./--version && rmdir ./--version +- if test ${ac_cv_path_mkdir+y}; then ++ if test "${ac_cv_path_mkdir+set}" = set; then + MKDIR_P="$ac_cv_path_mkdir -p" + else + # As a last resort, use the slow shell script. Don't cache a +@@ -3493,19 +3015,18 @@ fi + MKDIR_P="$ac_install_sh -d" + fi + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 +-printf "%s\n" "$MKDIR_P" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 ++$as_echo "$MKDIR_P" >&6; } + + for ac_prog in gawk mawk nawk awk + do + # Extract the first word of "$ac_prog", so it can be a program name with args. + set dummy $ac_prog; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_AWK+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_AWK+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. + else +@@ -3513,15 +3034,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AWK="$ac_prog" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -3532,25 +3049,24 @@ fi + fi + AWK=$ac_cv_prog_AWK + if test -n "$AWK"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +-printf "%s\n" "$AWK" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 ++$as_echo "$AWK" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + + test -n "$AWK" && break + done + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +-printf %s "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 ++$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } + set x ${MAKE-make} +-ac_make=`printf "%s\n" "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +-if eval test \${ac_cv_prog_make_${ac_make}_set+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` ++if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + cat >conftest.make <<\_ACEOF + SHELL = /bin/sh + all: +@@ -3566,12 +3082,12 @@ esac + rm -f conftest.make + fi + if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +-printf "%s\n" "yes" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 ++$as_echo "yes" >&6; } + SET_MAKE= + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" + fi + +@@ -3585,8 +3101,7 @@ fi + rmdir .tst 2>/dev/null + + # Check whether --enable-silent-rules was given. +-if test ${enable_silent_rules+y} +-then : ++if test "${enable_silent_rules+set}" = set; then : + enableval=$enable_silent_rules; + fi + +@@ -3596,13 +3111,12 @@ case $enable_silent_rules in # ((( + *) AM_DEFAULT_VERBOSITY=1;; + esac + am_make=${MAKE-make} +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 +-printf %s "checking whether $am_make supports nested variables... " >&6; } +-if test ${am_cv_make_support_nested_variables+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if printf "%s\n" 'TRUE=$(BAR$(V)) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 ++$as_echo_n "checking whether $am_make supports nested variables... " >&6; } ++if ${am_cv_make_support_nested_variables+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if $as_echo 'TRUE=$(BAR$(V)) + BAR0=false + BAR1=true + V=1 +@@ -3614,8 +3128,8 @@ else + am_cv_make_support_nested_variables=no + fi + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 +-printf "%s\n" "$am_cv_make_support_nested_variables" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 ++$as_echo "$am_cv_make_support_nested_variables" >&6; } + if test $am_cv_make_support_nested_variables = yes; then + AM_V='$(V)' + AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +@@ -3650,10 +3164,14 @@ fi + VERSION='0.1' + + +-printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h ++cat >>confdefs.h <<_ACEOF ++#define PACKAGE "$PACKAGE" ++_ACEOF + + +-printf "%s\n" "#define VERSION \"$VERSION\"" >>confdefs.h ++cat >>confdefs.h <<_ACEOF ++#define VERSION "$VERSION" ++_ACEOF + + # Some tools Automake needs. + +@@ -3693,20 +3211,6 @@ am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' + + + +-# Variables for tags utilities; see am/tags.am +-if test -z "$CTAGS"; then +- CTAGS=ctags +-fi +- +-if test -z "$ETAGS"; then +- ETAGS=etags +-fi +- +-if test -z "$CSCOPE"; then +- CSCOPE=cscope +-fi +- +- + + # POSIX will say in a future version that running "rm -f" with no argument + # is OK; and we want to be able to make that assumption in our Makefile +@@ -3751,18 +3255,17 @@ END + fi + + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5 +-printf %s "checking whether to enable maintainer-specific portions of Makefiles... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5 ++$as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; } + # Check whether --enable-maintainer-mode was given. +-if test ${enable_maintainer_mode+y} +-then : ++if test "${enable_maintainer_mode+set}" = set; then : + enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval +-else $as_nop ++else + USE_MAINTAINER_MODE=no + fi + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5 +-printf "%s\n" "$USE_MAINTAINER_MODE" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5 ++$as_echo "$USE_MAINTAINER_MODE" >&6; } + if test $USE_MAINTAINER_MODE = yes; then + MAINTAINER_MODE_TRUE= + MAINTAINER_MODE_FALSE='#' +@@ -3776,29 +3279,19 @@ fi + + + # Check whether --with-libiberty was given. +-if test ${with_libiberty+y} +-then : ++if test "${with_libiberty+set}" = set; then : + withval=$with_libiberty; +-else $as_nop ++else + with_libiberty=../libiberty + fi + + +- +- +- +- +- +- +- +- +- + DEPDIR="${am__leading_dot}deps" + + ac_config_commands="$ac_config_commands depfiles" + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} supports the include directive" >&5 +-printf %s "checking whether ${MAKE-make} supports the include directive... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} supports the include directive" >&5 ++$as_echo_n "checking whether ${MAKE-make} supports the include directive... " >&6; } + cat > confinc.mk << 'END' + am__doit: + @echo this is the am__doit target >confinc.out +@@ -3834,12 +3327,11 @@ esac + fi + done + rm -f confinc.* confmf.* +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${_am_result}" >&5 +-printf "%s\n" "${_am_result}" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${_am_result}" >&5 ++$as_echo "${_am_result}" >&6; } + + # Check whether --enable-dependency-tracking was given. +-if test ${enable_dependency_tracking+y} +-then : ++if test "${enable_dependency_tracking+set}" = set; then : + enableval=$enable_dependency_tracking; + fi + +@@ -3865,12 +3357,11 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. + set dummy ${ac_tool_prefix}gcc; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_CC+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. + else +@@ -3878,15 +3369,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -3897,11 +3384,11 @@ fi + fi + CC=$ac_cv_prog_CC + if test -n "$CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +-printf "%s\n" "$CC" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++$as_echo "$CC" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -3910,12 +3397,11 @@ if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. + set dummy gcc; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_CC+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. + else +@@ -3923,15 +3409,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -3942,11 +3424,11 @@ fi + fi + ac_ct_CC=$ac_cv_prog_ac_ct_CC + if test -n "$ac_ct_CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +-printf "%s\n" "$ac_ct_CC" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 ++$as_echo "$ac_ct_CC" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + if test "x$ac_ct_CC" = x; then +@@ -3954,8 +3436,8 @@ fi + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + CC=$ac_ct_CC +@@ -3968,12 +3450,11 @@ if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. + set dummy ${ac_tool_prefix}cc; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_CC+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. + else +@@ -3981,15 +3462,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -4000,11 +3477,11 @@ fi + fi + CC=$ac_cv_prog_CC + if test -n "$CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +-printf "%s\n" "$CC" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++$as_echo "$CC" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -4013,12 +3490,11 @@ fi + if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. + set dummy cc; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_CC+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. + else +@@ -4027,19 +3503,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- if test "$as_dir$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -4055,18 +3527,18 @@ if test $ac_prog_rejected = yes; then + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift +- ac_cv_prog_CC="$as_dir$ac_word${1+' '}$@" ++ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi + fi + fi + fi + CC=$ac_cv_prog_CC + if test -n "$CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +-printf "%s\n" "$CC" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++$as_echo "$CC" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -4077,12 +3549,11 @@ if test -z "$CC"; then + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. + set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_CC+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. + else +@@ -4090,15 +3561,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -4109,11 +3576,11 @@ fi + fi + CC=$ac_cv_prog_CC + if test -n "$CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +-printf "%s\n" "$CC" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++$as_echo "$CC" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -4126,118 +3593,11 @@ if test -z "$CC"; then + do + # Extract the first word of "$ac_prog", so it can be a program name with args. + set dummy $ac_prog; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if test -n "$ac_ct_CC"; then +- ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +-else +-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_CC="$ac_prog" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 +- break 2 +- fi +-done +- done +-IFS=$as_save_IFS +- +-fi +-fi +-ac_ct_CC=$ac_cv_prog_ac_ct_CC +-if test -n "$ac_ct_CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +-printf "%s\n" "$ac_ct_CC" >&6; } +-else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +-fi +- +- +- test -n "$ac_ct_CC" && break +-done +- +- if test "x$ac_ct_CC" = x; then +- CC="" +- else +- case $cross_compiling:$ac_tool_warned in +-yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +-ac_tool_warned=yes ;; +-esac +- CC=$ac_ct_CC +- fi +-fi +- +-fi +-if test -z "$CC"; then +- if test -n "$ac_tool_prefix"; then +- # Extract the first word of "${ac_tool_prefix}clang", so it can be a program name with args. +-set dummy ${ac_tool_prefix}clang; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if test -n "$CC"; then +- ac_cv_prog_CC="$CC" # Let the user override the test. +-else +-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- ac_cv_prog_CC="${ac_tool_prefix}clang" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 +- break 2 +- fi +-done +- done +-IFS=$as_save_IFS +- +-fi +-fi +-CC=$ac_cv_prog_CC +-if test -n "$CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +-printf "%s\n" "$CC" >&6; } +-else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +-fi +- +- +-fi +-if test -z "$ac_cv_prog_CC"; then +- ac_ct_CC=$CC +- # Extract the first word of "clang", so it can be a program name with args. +-set dummy clang; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_CC+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. + else +@@ -4245,15 +3605,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_CC="clang" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_CC="$ac_prog" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -4264,48 +3620,50 @@ fi + fi + ac_ct_CC=$ac_cv_prog_ac_ct_CC + if test -n "$ac_ct_CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +-printf "%s\n" "$ac_ct_CC" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 ++$as_echo "$ac_ct_CC" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + ++ ++ test -n "$ac_ct_CC" && break ++done ++ + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + CC=$ac_ct_CC + fi +-else +- CC="$ac_cv_prog_CC" + fi + + fi + + +-test -z "$CC" && { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + as_fn_error $? "no acceptable C compiler found in \$PATH + See \`config.log' for more details" "$LINENO" 5; } + + # Provide some information about the compiler. +-printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 ++$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 + set X $ac_compile + ac_compiler=$2 +-for ac_option in --version -v -V -qversion -version; do ++for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" + case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; + esac + eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +-printf "%s\n" "$ac_try_echo"; } >&5 ++$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then +@@ -4315,7 +3673,7 @@ printf "%s\n" "$ac_try_echo"; } >&5 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + done + +@@ -4323,7 +3681,7 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; +@@ -4335,9 +3693,9 @@ ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" + # Try to create an executable without -o first, disregard a.out. + # It will help us diagnose broken compilers, and finding out an intuition + # of exeext. +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +-printf %s "checking whether the C compiler works... " >&6; } +-ac_link_default=`printf "%s\n" "$ac_link" | sed 's/ -o *conftest[^ ]*//'` ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 ++$as_echo_n "checking whether the C compiler works... " >&6; } ++ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + + # The possible output files: + ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" +@@ -4358,12 +3716,11 @@ case "(($ac_try" in + *) ac_try_echo=$ac_try;; + esac + eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +-printf "%s\n" "$ac_try_echo"; } >&5 ++$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 +- test $ac_status = 0; } +-then : ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. + # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' + # in a Makefile. We should not override ac_cv_exeext if it was cached, +@@ -4380,7 +3737,7 @@ do + # certainly right. + break;; + *.* ) +- if test ${ac_cv_exeext+y} && test "$ac_cv_exeext" != no; ++ if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi +@@ -4396,46 +3753,44 @@ do + done + test "$ac_cv_exeext" = no && ac_cv_exeext= + +-else $as_nop ++else + ac_file='' + fi +-if test -z "$ac_file" +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +-printf "%s\n" "$as_me: failed program was:" >&5 ++if test -z "$ac_file"; then : ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++$as_echo "$as_me: failed program was:" >&5 + sed 's/^/| /' conftest.$ac_ext >&5 + +-{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + as_fn_error 77 "C compiler cannot create executables + See \`config.log' for more details" "$LINENO" 5; } +-else $as_nop +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +-printf "%s\n" "yes" >&6; } +-fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +-printf %s "checking for C compiler default output file name... " >&6; } +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +-printf "%s\n" "$ac_file" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 ++$as_echo "yes" >&6; } ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 ++$as_echo_n "checking for C compiler default output file name... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 ++$as_echo "$ac_file" >&6; } + ac_exeext=$ac_cv_exeext + + rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out + ac_clean_files=$ac_clean_files_save +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +-printf %s "checking for suffix of executables... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 ++$as_echo_n "checking for suffix of executables... " >&6; } + if { { ac_try="$ac_link" + case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; + esac + eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +-printf "%s\n" "$ac_try_echo"; } >&5 ++$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 +- test $ac_status = 0; } +-then : ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) + # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will + # work properly (i.e., refer to `conftest.exe'), while it won't with +@@ -4449,15 +3804,15 @@ for ac_file in conftest.exe conftest conftest.*; do + * ) break;; + esac + done +-else $as_nop +- { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++else ++ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + as_fn_error $? "cannot compute suffix of executables: cannot compile and link + See \`config.log' for more details" "$LINENO" 5; } + fi + rm -f conftest conftest$ac_cv_exeext +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +-printf "%s\n" "$ac_cv_exeext" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 ++$as_echo "$ac_cv_exeext" >&6; } + + rm -f conftest.$ac_ext + EXEEXT=$ac_cv_exeext +@@ -4466,7 +3821,7 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + #include + int +-main (void) ++main () + { + FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; +@@ -4478,8 +3833,8 @@ _ACEOF + ac_clean_files="$ac_clean_files conftest.out" + # Check that the compiler produces executables we can run. If not, either + # the compiler is broken, or we cross compile. +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +-printf %s "checking whether we are cross compiling... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 ++$as_echo_n "checking whether we are cross compiling... " >&6; } + if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" + case "(($ac_try" in +@@ -4487,10 +3842,10 @@ case "(($ac_try" in + *) ac_try_echo=$ac_try;; + esac + eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +-printf "%s\n" "$ac_try_echo"; } >&5 ++$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in +@@ -4498,40 +3853,39 @@ printf "%s\n" "$ac_try_echo"; } >&5 + *) ac_try_echo=$ac_try;; + esac + eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +-printf "%s\n" "$ac_try_echo"; } >&5 ++$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else +- { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +-as_fn_error 77 "cannot run C compiled programs. ++ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} ++as_fn_error $? "cannot run C compiled programs. + If you meant to cross compile, use \`--host'. + See \`config.log' for more details" "$LINENO" 5; } + fi + fi + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +-printf "%s\n" "$cross_compiling" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 ++$as_echo "$cross_compiling" >&6; } + + rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out + ac_clean_files=$ac_clean_files_save +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +-printf %s "checking for suffix of object files... " >&6; } +-if test ${ac_cv_objext+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 ++$as_echo_n "checking for suffix of object files... " >&6; } ++if ${ac_cv_objext+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; +@@ -4545,12 +3899,11 @@ case "(($ac_try" in + *) ac_try_echo=$ac_try;; + esac + eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +-printf "%s\n" "$ac_try_echo"; } >&5 ++$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 +- test $ac_status = 0; } +-then : ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in +@@ -4559,32 +3912,31 @@ then : + break;; + esac + done +-else $as_nop +- printf "%s\n" "$as_me: failed program was:" >&5 ++else ++ $as_echo "$as_me: failed program was:" >&5 + sed 's/^/| /' conftest.$ac_ext >&5 + +-{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + as_fn_error $? "cannot compute suffix of object files: cannot compile + See \`config.log' for more details" "$LINENO" 5; } + fi + rm -f conftest.$ac_cv_objext conftest.$ac_ext + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +-printf "%s\n" "$ac_cv_objext" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 ++$as_echo "$ac_cv_objext" >&6; } + OBJEXT=$ac_cv_objext + ac_objext=$OBJEXT +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C" >&5 +-printf %s "checking whether the compiler supports GNU C... " >&6; } +-if test ${ac_cv_c_compiler_gnu+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 ++$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } ++if ${ac_cv_c_compiler_gnu+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + #ifndef __GNUC__ + choke me +@@ -4594,33 +3946,29 @@ main (void) + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +-else $as_nop ++else + ac_compiler_gnu=no + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cv_c_compiler_gnu=$ac_compiler_gnu + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +-printf "%s\n" "$ac_cv_c_compiler_gnu" >&6; } +-ac_compiler_gnu=$ac_cv_c_compiler_gnu +- ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 ++$as_echo "$ac_cv_c_compiler_gnu" >&6; } + if test $ac_compiler_gnu = yes; then + GCC=yes + else + GCC= + fi +-ac_test_CFLAGS=${CFLAGS+y} ++ac_test_CFLAGS=${CFLAGS+set} + ac_save_CFLAGS=$CFLAGS +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +-printf %s "checking whether $CC accepts -g... " >&6; } +-if test ${ac_cv_prog_cc_g+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 ++$as_echo_n "checking whether $CC accepts -g... " >&6; } ++if ${ac_cv_prog_cc_g+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no +@@ -4629,60 +3977,57 @@ else $as_nop + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +-else $as_nop ++else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + +-else $as_nop ++else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +-printf "%s\n" "$ac_cv_prog_cc_g" >&6; } +-if test $ac_test_CFLAGS; then ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 ++$as_echo "$ac_cv_prog_cc_g" >&6; } ++if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS + elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then +@@ -4697,144 +4042,94 @@ else + CFLAGS= + fi + fi +-ac_prog_cc_stdc=no +-if test x$ac_prog_cc_stdc = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C11 features" >&5 +-printf %s "checking for $CC option to enable C11 features... " >&6; } +-if test ${ac_cv_prog_cc_c11+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- ac_cv_prog_cc_c11=no ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 ++$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } ++if ${ac_cv_prog_cc_c89+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ ac_cv_prog_cc_c89=no + ac_save_CC=$CC + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +-$ac_c_conftest_c11_program +-_ACEOF +-for ac_arg in '' -std=gnu11 +-do +- CC="$ac_save_CC $ac_arg" +- if ac_fn_c_try_compile "$LINENO" +-then : +- ac_cv_prog_cc_c11=$ac_arg +-fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam +- test "x$ac_cv_prog_cc_c11" != "xno" && break +-done +-rm -f conftest.$ac_ext +-CC=$ac_save_CC +-fi ++#include ++#include ++struct stat; ++/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ ++struct buf { int x; }; ++FILE * (*rcsopen) (struct buf *, struct stat *, int); ++static char *e (p, i) ++ char **p; ++ int i; ++{ ++ return p[i]; ++} ++static char *f (char * (*g) (char **, int), char **p, ...) ++{ ++ char *s; ++ va_list v; ++ va_start (v,p); ++ s = g (p, va_arg (v,int)); ++ va_end (v); ++ return s; ++} + +-if test "x$ac_cv_prog_cc_c11" = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +-printf "%s\n" "unsupported" >&6; } +-else $as_nop +- if test "x$ac_cv_prog_cc_c11" = x +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +-printf "%s\n" "none needed" >&6; } +-else $as_nop +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c11" >&5 +-printf "%s\n" "$ac_cv_prog_cc_c11" >&6; } +- CC="$CC $ac_cv_prog_cc_c11" +-fi +- ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c11 +- ac_prog_cc_stdc=c11 +-fi +-fi +-if test x$ac_prog_cc_stdc = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C99 features" >&5 +-printf %s "checking for $CC option to enable C99 features... " >&6; } +-if test ${ac_cv_prog_cc_c99+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- ac_cv_prog_cc_c99=no +-ac_save_CC=$CC +-cat confdefs.h - <<_ACEOF >conftest.$ac_ext +-/* end confdefs.h. */ +-$ac_c_conftest_c99_program +-_ACEOF +-for ac_arg in '' -std=gnu99 -std=c99 -c99 -qlanglvl=extc1x -qlanglvl=extc99 -AC99 -D_STDC_C99= +-do +- CC="$ac_save_CC $ac_arg" +- if ac_fn_c_try_compile "$LINENO" +-then : +- ac_cv_prog_cc_c99=$ac_arg +-fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam +- test "x$ac_cv_prog_cc_c99" != "xno" && break +-done +-rm -f conftest.$ac_ext +-CC=$ac_save_CC +-fi ++/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has ++ function prototypes and stuff, but not '\xHH' hex character constants. ++ These don't provoke an error unfortunately, instead are silently treated ++ as 'x'. The following induces an error, until -std is added to get ++ proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an ++ array size at least. It's necessary to write '\x00'==0 to get something ++ that's true only with -std. */ ++int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +-if test "x$ac_cv_prog_cc_c99" = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +-printf "%s\n" "unsupported" >&6; } +-else $as_nop +- if test "x$ac_cv_prog_cc_c99" = x +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +-printf "%s\n" "none needed" >&6; } +-else $as_nop +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5 +-printf "%s\n" "$ac_cv_prog_cc_c99" >&6; } +- CC="$CC $ac_cv_prog_cc_c99" +-fi +- ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99 +- ac_prog_cc_stdc=c99 +-fi +-fi +-if test x$ac_prog_cc_stdc = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C89 features" >&5 +-printf %s "checking for $CC option to enable C89 features... " >&6; } +-if test ${ac_cv_prog_cc_c89+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- ac_cv_prog_cc_c89=no +-ac_save_CC=$CC +-cat confdefs.h - <<_ACEOF >conftest.$ac_ext +-/* end confdefs.h. */ +-$ac_c_conftest_c89_program ++/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters ++ inside strings and character constants. */ ++#define FOO(x) 'x' ++int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; ++ ++int test (int i, double x); ++struct s1 {int (*f) (int a);}; ++struct s2 {int (*f) (double a);}; ++int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); ++int argc; ++char **argv; ++int ++main () ++{ ++return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ++ ; ++ return 0; ++} + _ACEOF +-for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" ++for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ ++ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" + do + CC="$ac_save_CC $ac_arg" +- if ac_fn_c_try_compile "$LINENO" +-then : ++ if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam ++rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break + done + rm -f conftest.$ac_ext + CC=$ac_save_CC +-fi + +-if test "x$ac_cv_prog_cc_c89" = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +-printf "%s\n" "unsupported" >&6; } +-else $as_nop +- if test "x$ac_cv_prog_cc_c89" = x +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +-printf "%s\n" "none needed" >&6; } +-else $as_nop +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +-printf "%s\n" "$ac_cv_prog_cc_c89" >&6; } +- CC="$CC $ac_cv_prog_cc_c89" +-fi +- ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89 +- ac_prog_cc_stdc=c89 + fi ++# AC_CACHE_VAL ++case "x$ac_cv_prog_cc_c89" in ++ x) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 ++$as_echo "none needed" >&6; } ;; ++ xno) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 ++$as_echo "unsupported" >&6; } ;; ++ *) ++ CC="$CC $ac_cv_prog_cc_c89" ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 ++$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; ++esac ++if test "x$ac_cv_prog_cc_c89" != xno; then : ++ + fi + + ac_ext=c +@@ -4843,23 +4138,21 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' + ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' + ac_compiler_gnu=$ac_cv_c_compiler_gnu + +- +- ac_ext=c ++ac_ext=c + ac_cpp='$CPP $CPPFLAGS' + ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' + ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' + ac_compiler_gnu=$ac_cv_c_compiler_gnu +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 +-printf %s "checking whether $CC understands -c and -o together... " >&6; } +-if test ${am_cv_prog_cc_c_o+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 ++$as_echo_n "checking whether $CC understands -c and -o together... " >&6; } ++if ${am_cv_prog_cc_c_o+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; +@@ -4887,8 +4180,8 @@ _ACEOF + rm -f core conftest* + unset am_i + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 +-printf "%s\n" "$am_cv_prog_cc_c_o" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 ++$as_echo "$am_cv_prog_cc_c_o" >&6; } + if test "$am_cv_prog_cc_c_o" != yes; then + # Losing compiler, so override with the script. + # FIXME: It is wrong to rewrite CC. +@@ -4906,12 +4199,11 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu + + depcc="$CC" am_compiler_list= + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +-printf %s "checking dependency style of $depcc... " >&6; } +-if test ${am_cv_CC_dependencies_compiler_type+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 ++$as_echo_n "checking dependency style of $depcc... " >&6; } ++if ${am_cv_CC_dependencies_compiler_type+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For +@@ -5014,195 +4306,483 @@ else $as_nop + cd .. + rm -rf conftest.dir + else +- am_cv_CC_dependencies_compiler_type=none +-fi +- ++ am_cv_CC_dependencies_compiler_type=none ++fi ++ ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 ++$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } ++CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type ++ ++ if ++ test "x$enable_dependency_tracking" != xno \ ++ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then ++ am__fastdepCC_TRUE= ++ am__fastdepCC_FALSE='#' ++else ++ am__fastdepCC_TRUE='#' ++ am__fastdepCC_FALSE= ++fi ++ ++ ++ ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 ++$as_echo_n "checking how to run the C preprocessor... " >&6; } ++# On Suns, sometimes $CPP names a directory. ++if test -n "$CPP" && test -d "$CPP"; then ++ CPP= ++fi ++if test -z "$CPP"; then ++ if ${ac_cv_prog_CPP+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ # Double quotes because CPP needs to be expanded ++ for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" ++ do ++ ac_preproc_ok=false ++for ac_c_preproc_warn_flag in '' yes ++do ++ # Use a header file that comes with gcc, so configuring glibc ++ # with a fresh cross-compiler works. ++ # Prefer to if __STDC__ is defined, since ++ # exists even on freestanding compilers. ++ # On the NeXT, cc -E runs the code through the compiler's parser, ++ # not just through cpp. "Syntax error" is here to catch this case. ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#ifdef __STDC__ ++# include ++#else ++# include ++#endif ++ Syntax error ++_ACEOF ++if ac_fn_c_try_cpp "$LINENO"; then : ++ ++else ++ # Broken: fails on valid input. ++continue ++fi ++rm -f conftest.err conftest.i conftest.$ac_ext ++ ++ # OK, works on sane cases. Now check whether nonexistent headers ++ # can be detected and how. ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#include ++_ACEOF ++if ac_fn_c_try_cpp "$LINENO"; then : ++ # Broken: success on invalid input. ++continue ++else ++ # Passes both tests. ++ac_preproc_ok=: ++break ++fi ++rm -f conftest.err conftest.i conftest.$ac_ext ++ ++done ++# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. ++rm -f conftest.i conftest.err conftest.$ac_ext ++if $ac_preproc_ok; then : ++ break ++fi ++ ++ done ++ ac_cv_prog_CPP=$CPP ++ ++fi ++ CPP=$ac_cv_prog_CPP ++else ++ ac_cv_prog_CPP=$CPP ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 ++$as_echo "$CPP" >&6; } ++ac_preproc_ok=false ++for ac_c_preproc_warn_flag in '' yes ++do ++ # Use a header file that comes with gcc, so configuring glibc ++ # with a fresh cross-compiler works. ++ # Prefer to if __STDC__ is defined, since ++ # exists even on freestanding compilers. ++ # On the NeXT, cc -E runs the code through the compiler's parser, ++ # not just through cpp. "Syntax error" is here to catch this case. ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#ifdef __STDC__ ++# include ++#else ++# include ++#endif ++ Syntax error ++_ACEOF ++if ac_fn_c_try_cpp "$LINENO"; then : ++ ++else ++ # Broken: fails on valid input. ++continue ++fi ++rm -f conftest.err conftest.i conftest.$ac_ext ++ ++ # OK, works on sane cases. Now check whether nonexistent headers ++ # can be detected and how. ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#include ++_ACEOF ++if ac_fn_c_try_cpp "$LINENO"; then : ++ # Broken: success on invalid input. ++continue ++else ++ # Passes both tests. ++ac_preproc_ok=: ++break ++fi ++rm -f conftest.err conftest.i conftest.$ac_ext ++ ++done ++# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. ++rm -f conftest.i conftest.err conftest.$ac_ext ++if $ac_preproc_ok; then : ++ ++else ++ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} ++as_fn_error $? "C preprocessor \"$CPP\" fails sanity check ++See \`config.log' for more details" "$LINENO" 5; } ++fi ++ ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 ++$as_echo_n "checking for grep that handles long lines and -e... " >&6; } ++if ${ac_cv_path_GREP+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -z "$GREP"; then ++ ac_path_GREP_found=false ++ # Loop through the user's path and test for each of PROGNAME-LIST ++ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_prog in grep ggrep; do ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" ++ as_fn_executable_p "$ac_path_GREP" || continue ++# Check for GNU ac_path_GREP and select it if it is found. ++ # Check for GNU $ac_path_GREP ++case `"$ac_path_GREP" --version 2>&1` in ++*GNU*) ++ ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; ++*) ++ ac_count=0 ++ $as_echo_n 0123456789 >"conftest.in" ++ while : ++ do ++ cat "conftest.in" "conftest.in" >"conftest.tmp" ++ mv "conftest.tmp" "conftest.in" ++ cp "conftest.in" "conftest.nl" ++ $as_echo 'GREP' >> "conftest.nl" ++ "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break ++ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ++ as_fn_arith $ac_count + 1 && ac_count=$as_val ++ if test $ac_count -gt ${ac_path_GREP_max-0}; then ++ # Best one so far, save it but keep looking for a better one ++ ac_cv_path_GREP="$ac_path_GREP" ++ ac_path_GREP_max=$ac_count ++ fi ++ # 10*(2^10) chars as input seems more than enough ++ test $ac_count -gt 10 && break ++ done ++ rm -f conftest.in conftest.tmp conftest.nl conftest.out;; ++esac ++ ++ $ac_path_GREP_found && break 3 ++ done ++ done ++ done ++IFS=$as_save_IFS ++ if test -z "$ac_cv_path_GREP"; then ++ as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 ++ fi ++else ++ ac_cv_path_GREP=$GREP + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 +-printf "%s\n" "$am_cv_CC_dependencies_compiler_type" >&6; } +-CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type + +- if +- test "x$enable_dependency_tracking" != xno \ +- && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then +- am__fastdepCC_TRUE= +- am__fastdepCC_FALSE='#' +-else +- am__fastdepCC_TRUE='#' +- am__fastdepCC_FALSE= + fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 ++$as_echo "$ac_cv_path_GREP" >&6; } ++ GREP="$ac_cv_path_GREP" + + +- +-ac_header= ac_cache= +-for ac_item in $ac_header_c_list ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 ++$as_echo_n "checking for egrep... " >&6; } ++if ${ac_cv_path_EGREP+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 ++ then ac_cv_path_EGREP="$GREP -E" ++ else ++ if test -z "$EGREP"; then ++ ac_path_EGREP_found=false ++ # Loop through the user's path and test for each of PROGNAME-LIST ++ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin + do +- if test $ac_cache; then +- ac_fn_c_check_header_compile "$LINENO" $ac_header ac_cv_header_$ac_cache "$ac_includes_default" +- if eval test \"x\$ac_cv_header_$ac_cache\" = xyes; then +- printf "%s\n" "#define $ac_item 1" >> confdefs.h ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_prog in egrep; do ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" ++ as_fn_executable_p "$ac_path_EGREP" || continue ++# Check for GNU ac_path_EGREP and select it if it is found. ++ # Check for GNU $ac_path_EGREP ++case `"$ac_path_EGREP" --version 2>&1` in ++*GNU*) ++ ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; ++*) ++ ac_count=0 ++ $as_echo_n 0123456789 >"conftest.in" ++ while : ++ do ++ cat "conftest.in" "conftest.in" >"conftest.tmp" ++ mv "conftest.tmp" "conftest.in" ++ cp "conftest.in" "conftest.nl" ++ $as_echo 'EGREP' >> "conftest.nl" ++ "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break ++ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break ++ as_fn_arith $ac_count + 1 && ac_count=$as_val ++ if test $ac_count -gt ${ac_path_EGREP_max-0}; then ++ # Best one so far, save it but keep looking for a better one ++ ac_cv_path_EGREP="$ac_path_EGREP" ++ ac_path_EGREP_max=$ac_count + fi +- ac_header= ac_cache= +- elif test $ac_header; then +- ac_cache=$ac_item +- else +- ac_header=$ac_item +- fi +-done +- +- +- +- +- +- +- +- +-if test $ac_cv_header_stdlib_h = yes && test $ac_cv_header_string_h = yes +-then : +- +-printf "%s\n" "#define STDC_HEADERS 1" >>confdefs.h ++ # 10*(2^10) chars as input seems more than enough ++ test $ac_count -gt 10 && break ++ done ++ rm -f conftest.in conftest.tmp conftest.nl conftest.out;; ++esac + ++ $ac_path_EGREP_found && break 3 ++ done ++ done ++ done ++IFS=$as_save_IFS ++ if test -z "$ac_cv_path_EGREP"; then ++ as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 ++ fi ++else ++ ac_cv_path_EGREP=$EGREP + fi + ++ fi ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 ++$as_echo "$ac_cv_path_EGREP" >&6; } ++ EGREP="$ac_cv_path_EGREP" + + +- +- +- +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5 +-printf %s "checking whether it is safe to define __EXTENSIONS__... " >&6; } +-if test ${ac_cv_safe_to_define___extensions__+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 ++$as_echo_n "checking for ANSI C header files... " >&6; } ++if ${ac_cv_header_stdc+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ ++#include ++#include ++#include ++#include + +-# define __EXTENSIONS__ 1 +- $ac_includes_default + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : +- ac_cv_safe_to_define___extensions__=yes +-else $as_nop +- ac_cv_safe_to_define___extensions__=no +-fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++if ac_fn_c_try_compile "$LINENO"; then : ++ ac_cv_header_stdc=yes ++else ++ ac_cv_header_stdc=no + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_safe_to_define___extensions__" >&5 +-printf "%s\n" "$ac_cv_safe_to_define___extensions__" >&6; } ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether _XOPEN_SOURCE should be defined" >&5 +-printf %s "checking whether _XOPEN_SOURCE should be defined... " >&6; } +-if test ${ac_cv_should_define__xopen_source+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- ac_cv_should_define__xopen_source=no +- if test $ac_cv_header_wchar_h = yes +-then : ++if test $ac_cv_header_stdc = yes; then ++ # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ ++#include + +- #include +- mbstate_t x; +-int +-main (void) +-{ ++_ACEOF ++if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | ++ $EGREP "memchr" >/dev/null 2>&1; then : ++ ++else ++ ac_cv_header_stdc=no ++fi ++rm -f conftest* ++ ++fi ++ ++if test $ac_cv_header_stdc = yes; then ++ # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++#include + +- ; +- return 0; +-} + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | ++ $EGREP "free" >/dev/null 2>&1; then : ++ ++else ++ ac_cv_header_stdc=no ++fi ++rm -f conftest* + +-else $as_nop ++fi ++ ++if test $ac_cv_header_stdc = yes; then ++ # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. ++ if test "$cross_compiling" = yes; then : ++ : ++else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ ++#include ++#include ++#if ((' ' & 0x0FF) == 0x020) ++# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') ++# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) ++#else ++# define ISLOWER(c) \ ++ (('a' <= (c) && (c) <= 'i') \ ++ || ('j' <= (c) && (c) <= 'r') \ ++ || ('s' <= (c) && (c) <= 'z')) ++# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) ++#endif + +- #define _XOPEN_SOURCE 500 +- #include +- mbstate_t x; ++#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) + int +-main (void) ++main () + { +- +- ; ++ int i; ++ for (i = 0; i < 256; i++) ++ if (XOR (islower (i), ISLOWER (i)) ++ || toupper (i) != TOUPPER (i)) ++ return 2; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : +- ac_cv_should_define__xopen_source=yes ++if ac_fn_c_try_run "$LINENO"; then : ++ ++else ++ ac_cv_header_stdc=no + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ ++ conftest.$ac_objext conftest.beam conftest.$ac_ext + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++ + fi + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_should_define__xopen_source" >&5 +-printf "%s\n" "$ac_cv_should_define__xopen_source" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 ++$as_echo "$ac_cv_header_stdc" >&6; } ++if test $ac_cv_header_stdc = yes; then + +- printf "%s\n" "#define _ALL_SOURCE 1" >>confdefs.h ++$as_echo "#define STDC_HEADERS 1" >>confdefs.h ++ ++fi + +- printf "%s\n" "#define _DARWIN_C_SOURCE 1" >>confdefs.h ++# On IRIX 5.3, sys/types and inttypes.h are conflicting. ++for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ ++ inttypes.h stdint.h unistd.h ++do : ++ as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ++ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default ++" ++if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : ++ cat >>confdefs.h <<_ACEOF ++#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 ++_ACEOF + +- printf "%s\n" "#define _GNU_SOURCE 1" >>confdefs.h ++fi + +- printf "%s\n" "#define _HPUX_ALT_XOPEN_SOCKET_API 1" >>confdefs.h ++done + +- printf "%s\n" "#define _NETBSD_SOURCE 1" >>confdefs.h + +- printf "%s\n" "#define _OPENBSD_SOURCE 1" >>confdefs.h + +- printf "%s\n" "#define _POSIX_PTHREAD_SEMANTICS 1" >>confdefs.h ++ ac_fn_c_check_header_mongrel "$LINENO" "minix/config.h" "ac_cv_header_minix_config_h" "$ac_includes_default" ++if test "x$ac_cv_header_minix_config_h" = xyes; then : ++ MINIX=yes ++else ++ MINIX= ++fi + +- printf "%s\n" "#define __STDC_WANT_IEC_60559_ATTRIBS_EXT__ 1" >>confdefs.h + +- printf "%s\n" "#define __STDC_WANT_IEC_60559_BFP_EXT__ 1" >>confdefs.h ++ if test "$MINIX" = yes; then + +- printf "%s\n" "#define __STDC_WANT_IEC_60559_DFP_EXT__ 1" >>confdefs.h ++$as_echo "#define _POSIX_SOURCE 1" >>confdefs.h + +- printf "%s\n" "#define __STDC_WANT_IEC_60559_FUNCS_EXT__ 1" >>confdefs.h + +- printf "%s\n" "#define __STDC_WANT_IEC_60559_TYPES_EXT__ 1" >>confdefs.h ++$as_echo "#define _POSIX_1_SOURCE 2" >>confdefs.h + +- printf "%s\n" "#define __STDC_WANT_LIB_EXT2__ 1" >>confdefs.h + +- printf "%s\n" "#define __STDC_WANT_MATH_SPEC_FUNCS__ 1" >>confdefs.h ++$as_echo "#define _MINIX 1" >>confdefs.h + +- printf "%s\n" "#define _TANDEM_SOURCE 1" >>confdefs.h ++ fi + +- if test $ac_cv_header_minix_config_h = yes +-then : +- MINIX=yes +- printf "%s\n" "#define _MINIX 1" >>confdefs.h + +- printf "%s\n" "#define _POSIX_SOURCE 1" >>confdefs.h ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5 ++$as_echo_n "checking whether it is safe to define __EXTENSIONS__... " >&6; } ++if ${ac_cv_safe_to_define___extensions__+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ + +- printf "%s\n" "#define _POSIX_1_SOURCE 2" >>confdefs.h ++# define __EXTENSIONS__ 1 ++ $ac_includes_default ++int ++main () ++{ + +-else $as_nop +- MINIX= ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO"; then : ++ ac_cv_safe_to_define___extensions__=yes ++else ++ ac_cv_safe_to_define___extensions__=no + fi +- if test $ac_cv_safe_to_define___extensions__ = yes +-then : +- printf "%s\n" "#define __EXTENSIONS__ 1" >>confdefs.h +- ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi +- if test $ac_cv_should_define__xopen_source = yes +-then : +- printf "%s\n" "#define _XOPEN_SOURCE 500" >>confdefs.h ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_safe_to_define___extensions__" >&5 ++$as_echo "$ac_cv_safe_to_define___extensions__" >&6; } ++ test $ac_cv_safe_to_define___extensions__ = yes && ++ $as_echo "#define __EXTENSIONS__ 1" >>confdefs.h ++ ++ $as_echo "#define _ALL_SOURCE 1" >>confdefs.h ++ ++ $as_echo "#define _GNU_SOURCE 1" >>confdefs.h ++ ++ $as_echo "#define _POSIX_PTHREAD_SEMANTICS 1" >>confdefs.h ++ ++ $as_echo "#define _TANDEM_SOURCE 1" >>confdefs.h + +-fi + + ac_ext=c + ac_cpp='$CPP $CPPFLAGS' +@@ -5212,12 +4792,11 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. + set dummy ${ac_tool_prefix}gcc; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_CC+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. + else +@@ -5225,15 +4804,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -5244,11 +4819,11 @@ fi + fi + CC=$ac_cv_prog_CC + if test -n "$CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +-printf "%s\n" "$CC" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++$as_echo "$CC" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -5257,12 +4832,11 @@ if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. + set dummy gcc; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_CC+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. + else +@@ -5270,15 +4844,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -5289,11 +4859,11 @@ fi + fi + ac_ct_CC=$ac_cv_prog_ac_ct_CC + if test -n "$ac_ct_CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +-printf "%s\n" "$ac_ct_CC" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 ++$as_echo "$ac_ct_CC" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + if test "x$ac_ct_CC" = x; then +@@ -5301,8 +4871,8 @@ fi + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + CC=$ac_ct_CC +@@ -5315,12 +4885,11 @@ if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. + set dummy ${ac_tool_prefix}cc; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_CC+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. + else +@@ -5328,15 +4897,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -5347,11 +4912,11 @@ fi + fi + CC=$ac_cv_prog_CC + if test -n "$CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +-printf "%s\n" "$CC" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++$as_echo "$CC" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -5360,12 +4925,11 @@ fi + if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. + set dummy cc; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_CC+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. + else +@@ -5374,19 +4938,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- if test "$as_dir$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -5402,18 +4962,18 @@ if test $ac_prog_rejected = yes; then + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift +- ac_cv_prog_CC="$as_dir$ac_word${1+' '}$@" ++ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi + fi + fi + fi + CC=$ac_cv_prog_CC + if test -n "$CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +-printf "%s\n" "$CC" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++$as_echo "$CC" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -5424,12 +4984,11 @@ if test -z "$CC"; then + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. + set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_CC+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. + else +@@ -5437,15 +4996,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -5456,11 +5011,11 @@ fi + fi + CC=$ac_cv_prog_CC + if test -n "$CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +-printf "%s\n" "$CC" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 ++$as_echo "$CC" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -5473,12 +5028,11 @@ if test -z "$CC"; then + do + # Extract the first word of "$ac_prog", so it can be a program name with args. + set dummy $ac_prog; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_CC+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. + else +@@ -5486,15 +5040,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -5505,11 +5055,11 @@ fi + fi + ac_ct_CC=$ac_cv_prog_ac_ct_CC + if test -n "$ac_ct_CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +-printf "%s\n" "$ac_ct_CC" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 ++$as_echo "$ac_ct_CC" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -5521,138 +5071,34 @@ done + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +-ac_tool_warned=yes ;; +-esac +- CC=$ac_ct_CC +- fi +-fi +- +-fi +-if test -z "$CC"; then +- if test -n "$ac_tool_prefix"; then +- # Extract the first word of "${ac_tool_prefix}clang", so it can be a program name with args. +-set dummy ${ac_tool_prefix}clang; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if test -n "$CC"; then +- ac_cv_prog_CC="$CC" # Let the user override the test. +-else +-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- ac_cv_prog_CC="${ac_tool_prefix}clang" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 +- break 2 +- fi +-done +- done +-IFS=$as_save_IFS +- +-fi +-fi +-CC=$ac_cv_prog_CC +-if test -n "$CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +-printf "%s\n" "$CC" >&6; } +-else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +-fi +- +- +-fi +-if test -z "$ac_cv_prog_CC"; then +- ac_ct_CC=$CC +- # Extract the first word of "clang", so it can be a program name with args. +-set dummy clang; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if test -n "$ac_ct_CC"; then +- ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +-else +-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_CC="clang" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 +- break 2 +- fi +-done +- done +-IFS=$as_save_IFS +- +-fi +-fi +-ac_ct_CC=$ac_cv_prog_ac_ct_CC +-if test -n "$ac_ct_CC"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +-printf "%s\n" "$ac_ct_CC" >&6; } +-else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +-fi +- +- if test "x$ac_ct_CC" = x; then +- CC="" +- else +- case $cross_compiling:$ac_tool_warned in +-yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + CC=$ac_ct_CC + fi +-else +- CC="$ac_cv_prog_CC" + fi + + fi + + +-test -z "$CC" && { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + as_fn_error $? "no acceptable C compiler found in \$PATH + See \`config.log' for more details" "$LINENO" 5; } + + # Provide some information about the compiler. +-printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 ++$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 + set X $ac_compile + ac_compiler=$2 +-for ac_option in --version -v -V -qversion -version; do ++for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" + case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; + esac + eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +-printf "%s\n" "$ac_try_echo"; } >&5 ++$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then +@@ -5662,21 +5108,20 @@ printf "%s\n" "$ac_try_echo"; } >&5 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + done + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C" >&5 +-printf %s "checking whether the compiler supports GNU C... " >&6; } +-if test ${ac_cv_c_compiler_gnu+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 ++$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } ++if ${ac_cv_c_compiler_gnu+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + #ifndef __GNUC__ + choke me +@@ -5686,33 +5131,29 @@ main (void) + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +-else $as_nop ++else + ac_compiler_gnu=no + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cv_c_compiler_gnu=$ac_compiler_gnu + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +-printf "%s\n" "$ac_cv_c_compiler_gnu" >&6; } +-ac_compiler_gnu=$ac_cv_c_compiler_gnu +- ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 ++$as_echo "$ac_cv_c_compiler_gnu" >&6; } + if test $ac_compiler_gnu = yes; then + GCC=yes + else + GCC= + fi +-ac_test_CFLAGS=${CFLAGS+y} ++ac_test_CFLAGS=${CFLAGS+set} + ac_save_CFLAGS=$CFLAGS +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +-printf %s "checking whether $CC accepts -g... " >&6; } +-if test ${ac_cv_prog_cc_g+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 ++$as_echo_n "checking whether $CC accepts -g... " >&6; } ++if ${ac_cv_prog_cc_g+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no +@@ -5721,60 +5162,57 @@ else $as_nop + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +-else $as_nop ++else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + +-else $as_nop ++else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +-printf "%s\n" "$ac_cv_prog_cc_g" >&6; } +-if test $ac_test_CFLAGS; then ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 ++$as_echo "$ac_cv_prog_cc_g" >&6; } ++if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS + elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then +@@ -5786,147 +5224,97 @@ else + if test "$GCC" = yes; then + CFLAGS="-O2" + else +- CFLAGS= +- fi +-fi +-ac_prog_cc_stdc=no +-if test x$ac_prog_cc_stdc = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C11 features" >&5 +-printf %s "checking for $CC option to enable C11 features... " >&6; } +-if test ${ac_cv_prog_cc_c11+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- ac_cv_prog_cc_c11=no +-ac_save_CC=$CC +-cat confdefs.h - <<_ACEOF >conftest.$ac_ext +-/* end confdefs.h. */ +-$ac_c_conftest_c11_program +-_ACEOF +-for ac_arg in '' -std=gnu11 +-do +- CC="$ac_save_CC $ac_arg" +- if ac_fn_c_try_compile "$LINENO" +-then : +- ac_cv_prog_cc_c11=$ac_arg +-fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam +- test "x$ac_cv_prog_cc_c11" != "xno" && break +-done +-rm -f conftest.$ac_ext +-CC=$ac_save_CC +-fi +- +-if test "x$ac_cv_prog_cc_c11" = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +-printf "%s\n" "unsupported" >&6; } +-else $as_nop +- if test "x$ac_cv_prog_cc_c11" = x +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +-printf "%s\n" "none needed" >&6; } +-else $as_nop +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c11" >&5 +-printf "%s\n" "$ac_cv_prog_cc_c11" >&6; } +- CC="$CC $ac_cv_prog_cc_c11" +-fi +- ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c11 +- ac_prog_cc_stdc=c11 +-fi +-fi +-if test x$ac_prog_cc_stdc = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C99 features" >&5 +-printf %s "checking for $CC option to enable C99 features... " >&6; } +-if test ${ac_cv_prog_cc_c99+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- ac_cv_prog_cc_c99=no +-ac_save_CC=$CC +-cat confdefs.h - <<_ACEOF >conftest.$ac_ext +-/* end confdefs.h. */ +-$ac_c_conftest_c99_program +-_ACEOF +-for ac_arg in '' -std=gnu99 -std=c99 -c99 -qlanglvl=extc1x -qlanglvl=extc99 -AC99 -D_STDC_C99= +-do +- CC="$ac_save_CC $ac_arg" +- if ac_fn_c_try_compile "$LINENO" +-then : +- ac_cv_prog_cc_c99=$ac_arg +-fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam +- test "x$ac_cv_prog_cc_c99" != "xno" && break +-done +-rm -f conftest.$ac_ext +-CC=$ac_save_CC ++ CFLAGS= ++ fi + fi +- +-if test "x$ac_cv_prog_cc_c99" = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +-printf "%s\n" "unsupported" >&6; } +-else $as_nop +- if test "x$ac_cv_prog_cc_c99" = x +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +-printf "%s\n" "none needed" >&6; } +-else $as_nop +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5 +-printf "%s\n" "$ac_cv_prog_cc_c99" >&6; } +- CC="$CC $ac_cv_prog_cc_c99" +-fi +- ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99 +- ac_prog_cc_stdc=c99 +-fi +-fi +-if test x$ac_prog_cc_stdc = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C89 features" >&5 +-printf %s "checking for $CC option to enable C89 features... " >&6; } +-if test ${ac_cv_prog_cc_c89+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 ++$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } ++if ${ac_cv_prog_cc_c89+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + ac_cv_prog_cc_c89=no + ac_save_CC=$CC + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +-$ac_c_conftest_c89_program ++#include ++#include ++struct stat; ++/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ ++struct buf { int x; }; ++FILE * (*rcsopen) (struct buf *, struct stat *, int); ++static char *e (p, i) ++ char **p; ++ int i; ++{ ++ return p[i]; ++} ++static char *f (char * (*g) (char **, int), char **p, ...) ++{ ++ char *s; ++ va_list v; ++ va_start (v,p); ++ s = g (p, va_arg (v,int)); ++ va_end (v); ++ return s; ++} ++ ++/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has ++ function prototypes and stuff, but not '\xHH' hex character constants. ++ These don't provoke an error unfortunately, instead are silently treated ++ as 'x'. The following induces an error, until -std is added to get ++ proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an ++ array size at least. It's necessary to write '\x00'==0 to get something ++ that's true only with -std. */ ++int osf4_cc_array ['\x00' == 0 ? 1 : -1]; ++ ++/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters ++ inside strings and character constants. */ ++#define FOO(x) 'x' ++int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; ++ ++int test (int i, double x); ++struct s1 {int (*f) (int a);}; ++struct s2 {int (*f) (double a);}; ++int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); ++int argc; ++char **argv; ++int ++main () ++{ ++return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ++ ; ++ return 0; ++} + _ACEOF +-for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" ++for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ ++ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" + do + CC="$ac_save_CC $ac_arg" +- if ac_fn_c_try_compile "$LINENO" +-then : ++ if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam ++rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break + done + rm -f conftest.$ac_ext + CC=$ac_save_CC +-fi + +-if test "x$ac_cv_prog_cc_c89" = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +-printf "%s\n" "unsupported" >&6; } +-else $as_nop +- if test "x$ac_cv_prog_cc_c89" = x +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +-printf "%s\n" "none needed" >&6; } +-else $as_nop +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +-printf "%s\n" "$ac_cv_prog_cc_c89" >&6; } +- CC="$CC $ac_cv_prog_cc_c89" +-fi +- ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89 +- ac_prog_cc_stdc=c89 + fi ++# AC_CACHE_VAL ++case "x$ac_cv_prog_cc_c89" in ++ x) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 ++$as_echo "none needed" >&6; } ;; ++ xno) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 ++$as_echo "unsupported" >&6; } ;; ++ *) ++ CC="$CC $ac_cv_prog_cc_c89" ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 ++$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; ++esac ++if test "x$ac_cv_prog_cc_c89" != xno; then : ++ + fi + + ac_ext=c +@@ -5935,23 +5323,21 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' + ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' + ac_compiler_gnu=$ac_cv_c_compiler_gnu + +- +- ac_ext=c ++ac_ext=c + ac_cpp='$CPP $CPPFLAGS' + ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' + ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' + ac_compiler_gnu=$ac_cv_c_compiler_gnu +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 +-printf %s "checking whether $CC understands -c and -o together... " >&6; } +-if test ${am_cv_prog_cc_c_o+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 ++$as_echo_n "checking whether $CC understands -c and -o together... " >&6; } ++if ${am_cv_prog_cc_c_o+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; +@@ -5979,8 +5365,8 @@ _ACEOF + rm -f core conftest* + unset am_i + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 +-printf "%s\n" "$am_cv_prog_cc_c_o" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 ++$as_echo "$am_cv_prog_cc_c_o" >&6; } + if test "$am_cv_prog_cc_c_o" != yes; then + # Losing compiler, so override with the script. + # FIXME: It is wrong to rewrite CC. +@@ -5998,12 +5384,11 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu + + depcc="$CC" am_compiler_list= + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +-printf %s "checking dependency style of $depcc... " >&6; } +-if test ${am_cv_CC_dependencies_compiler_type+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 ++$as_echo_n "checking dependency style of $depcc... " >&6; } ++if ${am_cv_CC_dependencies_compiler_type+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For +@@ -6110,8 +5495,8 @@ else + fi + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 +-printf "%s\n" "$am_cv_CC_dependencies_compiler_type" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 ++$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } + CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type + + if +@@ -6125,12 +5510,6 @@ else + fi + + +- +- +- +- +- +- + ac_ext=cpp + ac_cpp='$CXXCPP $CPPFLAGS' + ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +@@ -6141,16 +5520,15 @@ if test -z "$CXX"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then +- for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++ ++ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. + set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_CXX+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_CXX+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. + else +@@ -6158,15 +5536,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -6177,11 +5551,11 @@ fi + fi + CXX=$ac_cv_prog_CXX + if test -n "$CXX"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 +-printf "%s\n" "$CXX" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 ++$as_echo "$CXX" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -6190,16 +5564,15 @@ fi + fi + if test -z "$CXX"; then + ac_ct_CXX=$CXX +- for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++ ++ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_prog", so it can be a program name with args. + set dummy $ac_prog; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_CXX+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_CXX+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. + else +@@ -6207,15 +5580,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CXX="$ac_prog" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -6226,11 +5595,11 @@ fi + fi + ac_ct_CXX=$ac_cv_prog_ac_ct_CXX + if test -n "$ac_ct_CXX"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 +-printf "%s\n" "$ac_ct_CXX" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 ++$as_echo "$ac_ct_CXX" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -6242,8 +5611,8 @@ done + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + CXX=$ac_ct_CXX +@@ -6253,7 +5622,7 @@ fi + fi + fi + # Provide some information about the compiler. +-printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 ++$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 + set X $ac_compile + ac_compiler=$2 + for ac_option in --version -v -V -qversion; do +@@ -6263,7 +5632,7 @@ case "(($ac_try" in + *) ac_try_echo=$ac_try;; + esac + eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +-printf "%s\n" "$ac_try_echo"; } >&5 ++$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then +@@ -6273,21 +5642,20 @@ printf "%s\n" "$ac_try_echo"; } >&5 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + done + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C++" >&5 +-printf %s "checking whether the compiler supports GNU C++... " >&6; } +-if test ${ac_cv_cxx_compiler_gnu+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 ++$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } ++if ${ac_cv_cxx_compiler_gnu+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + #ifndef __GNUC__ + choke me +@@ -6297,33 +5665,29 @@ main (void) + return 0; + } + _ACEOF +-if ac_fn_cxx_try_compile "$LINENO" +-then : ++if ac_fn_cxx_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +-else $as_nop ++else + ac_compiler_gnu=no + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 +-printf "%s\n" "$ac_cv_cxx_compiler_gnu" >&6; } +-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +- ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 ++$as_echo "$ac_cv_cxx_compiler_gnu" >&6; } + if test $ac_compiler_gnu = yes; then + GXX=yes + else + GXX= + fi +-ac_test_CXXFLAGS=${CXXFLAGS+y} ++ac_test_CXXFLAGS=${CXXFLAGS+set} + ac_save_CXXFLAGS=$CXXFLAGS +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 +-printf %s "checking whether $CXX accepts -g... " >&6; } +-if test ${ac_cv_prog_cxx_g+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 ++$as_echo_n "checking whether $CXX accepts -g... " >&6; } ++if ${ac_cv_prog_cxx_g+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no +@@ -6332,60 +5696,57 @@ else $as_nop + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_cxx_try_compile "$LINENO" +-then : ++if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +-else $as_nop ++else + CXXFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_cxx_try_compile "$LINENO" +-then : ++if ac_fn_cxx_try_compile "$LINENO"; then : + +-else $as_nop ++else + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_cxx_try_compile "$LINENO" +-then : ++if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 +-printf "%s\n" "$ac_cv_prog_cxx_g" >&6; } +-if test $ac_test_CXXFLAGS; then ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 ++$as_echo "$ac_cv_prog_cxx_g" >&6; } ++if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS + elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then +@@ -6400,100 +5761,6 @@ else + CXXFLAGS= + fi + fi +-ac_prog_cxx_stdcxx=no +-if test x$ac_prog_cxx_stdcxx = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++11 features" >&5 +-printf %s "checking for $CXX option to enable C++11 features... " >&6; } +-if test ${ac_cv_prog_cxx_cxx11+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- ac_cv_prog_cxx_cxx11=no +-ac_save_CXX=$CXX +-cat confdefs.h - <<_ACEOF >conftest.$ac_ext +-/* end confdefs.h. */ +-$ac_cxx_conftest_cxx11_program +-_ACEOF +-for ac_arg in '' -std=gnu++11 -std=gnu++0x -std=c++11 -std=c++0x -qlanglvl=extended0x -AA +-do +- CXX="$ac_save_CXX $ac_arg" +- if ac_fn_cxx_try_compile "$LINENO" +-then : +- ac_cv_prog_cxx_cxx11=$ac_arg +-fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam +- test "x$ac_cv_prog_cxx_cxx11" != "xno" && break +-done +-rm -f conftest.$ac_ext +-CXX=$ac_save_CXX +-fi +- +-if test "x$ac_cv_prog_cxx_cxx11" = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +-printf "%s\n" "unsupported" >&6; } +-else $as_nop +- if test "x$ac_cv_prog_cxx_cxx11" = x +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +-printf "%s\n" "none needed" >&6; } +-else $as_nop +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx11" >&5 +-printf "%s\n" "$ac_cv_prog_cxx_cxx11" >&6; } +- CXX="$CXX $ac_cv_prog_cxx_cxx11" +-fi +- ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx11 +- ac_prog_cxx_stdcxx=cxx11 +-fi +-fi +-if test x$ac_prog_cxx_stdcxx = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++98 features" >&5 +-printf %s "checking for $CXX option to enable C++98 features... " >&6; } +-if test ${ac_cv_prog_cxx_cxx98+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- ac_cv_prog_cxx_cxx98=no +-ac_save_CXX=$CXX +-cat confdefs.h - <<_ACEOF >conftest.$ac_ext +-/* end confdefs.h. */ +-$ac_cxx_conftest_cxx98_program +-_ACEOF +-for ac_arg in '' -std=gnu++98 -std=c++98 -qlanglvl=extended -AA +-do +- CXX="$ac_save_CXX $ac_arg" +- if ac_fn_cxx_try_compile "$LINENO" +-then : +- ac_cv_prog_cxx_cxx98=$ac_arg +-fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam +- test "x$ac_cv_prog_cxx_cxx98" != "xno" && break +-done +-rm -f conftest.$ac_ext +-CXX=$ac_save_CXX +-fi +- +-if test "x$ac_cv_prog_cxx_cxx98" = xno +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +-printf "%s\n" "unsupported" >&6; } +-else $as_nop +- if test "x$ac_cv_prog_cxx_cxx98" = x +-then : +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +-printf "%s\n" "none needed" >&6; } +-else $as_nop +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx98" >&5 +-printf "%s\n" "$ac_cv_prog_cxx_cxx98" >&6; } +- CXX="$CXX $ac_cv_prog_cxx_cxx98" +-fi +- ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx98 +- ac_prog_cxx_stdcxx=cxx98 +-fi +-fi +- + ac_ext=c + ac_cpp='$CPP $CPPFLAGS' + ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +@@ -6502,12 +5769,11 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu + + depcc="$CXX" am_compiler_list= + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +-printf %s "checking dependency style of $depcc... " >&6; } +-if test ${am_cv_CXX_dependencies_compiler_type+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 ++$as_echo_n "checking dependency style of $depcc... " >&6; } ++if ${am_cv_CXX_dependencies_compiler_type+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For +@@ -6614,8 +5880,8 @@ else + fi + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 +-printf "%s\n" "$am_cv_CXX_dependencies_compiler_type" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 ++$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; } + CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type + + if +@@ -6630,19 +5896,17 @@ fi + + + # Check whether --enable-largefile was given. +-if test ${enable_largefile+y} +-then : ++if test "${enable_largefile+set}" = set; then : + enableval=$enable_largefile; + fi + + if test "$enable_largefile" != no; then + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 +-printf %s "checking for special C compiler options needed for large files... " >&6; } +-if test ${ac_cv_sys_largefile_CC+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 ++$as_echo_n "checking for special C compiler options needed for large files... " >&6; } ++if ${ac_cv_sys_largefile_CC+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + ac_cv_sys_largefile_CC=no + if test "$GCC" != yes; then + ac_save_CC=$CC +@@ -6656,47 +5920,44 @@ else $as_nop + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +-#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) ++#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +- if ac_fn_c_try_compile "$LINENO" +-then : ++ if ac_fn_c_try_compile "$LINENO"; then : + break + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam ++rm -f core conftest.err conftest.$ac_objext + CC="$CC -n32" +- if ac_fn_c_try_compile "$LINENO" +-then : ++ if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_sys_largefile_CC=' -n32'; break + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam ++rm -f core conftest.err conftest.$ac_objext + break + done + CC=$ac_save_CC + rm -f conftest.$ac_ext + fi + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 +-printf "%s\n" "$ac_cv_sys_largefile_CC" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 ++$as_echo "$ac_cv_sys_largefile_CC" >&6; } + if test "$ac_cv_sys_largefile_CC" != no; then + CC=$CC$ac_cv_sys_largefile_CC + fi + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 +-printf %s "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } +-if test ${ac_cv_sys_file_offset_bits+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 ++$as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } ++if ${ac_cv_sys_file_offset_bits+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + while :; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +@@ -6705,23 +5966,22 @@ else $as_nop + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +-#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) ++#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_sys_file_offset_bits=no; break + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + #define _FILE_OFFSET_BITS 64 +@@ -6730,43 +5990,43 @@ rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +-#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) ++#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_sys_file_offset_bits=64; break + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cv_sys_file_offset_bits=unknown + break + done + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 +-printf "%s\n" "$ac_cv_sys_file_offset_bits" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 ++$as_echo "$ac_cv_sys_file_offset_bits" >&6; } + case $ac_cv_sys_file_offset_bits in #( + no | unknown) ;; + *) +-printf "%s\n" "#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits" >>confdefs.h ++cat >>confdefs.h <<_ACEOF ++#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits ++_ACEOF + ;; + esac + rm -rf conftest* + if test $ac_cv_sys_file_offset_bits = unknown; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 +-printf %s "checking for _LARGE_FILES value needed for large files... " >&6; } +-if test ${ac_cv_sys_large_files+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 ++$as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } ++if ${ac_cv_sys_large_files+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + while :; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +@@ -6775,23 +6035,22 @@ else $as_nop + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +-#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) ++#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_sys_large_files=no; break + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + #define _LARGE_FILES 1 +@@ -6800,60 +6059,119 @@ rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +-#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) ++#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_sys_large_files=1; break + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cv_sys_large_files=unknown + break + done + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 +-printf "%s\n" "$ac_cv_sys_large_files" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 ++$as_echo "$ac_cv_sys_large_files" >&6; } + case $ac_cv_sys_large_files in #( + no | unknown) ;; + *) +-printf "%s\n" "#define _LARGE_FILES $ac_cv_sys_large_files" >>confdefs.h ++cat >>confdefs.h <<_ACEOF ++#define _LARGE_FILES $ac_cv_sys_large_files ++_ACEOF + ;; + esac + rm -rf conftest* + fi ++ ++ ++fi ++ ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ ++ac_bolt_plugin_warn_cflags= ++save_CFLAGS="$CFLAGS" ++for real_option in -Wall; do ++ # Do the check with the no- prefix removed since gcc silently ++ # accepts any -Wno-* option on purpose ++ case $real_option in ++ -Wno-*) option=-W`expr x$real_option : 'x-Wno-\(.*\)'` ;; ++ *) option=$real_option ;; ++ esac ++ as_acx_Woption=`$as_echo "acx_cv_prog_cc_warning_$option" | $as_tr_sh` ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports $option" >&5 ++$as_echo_n "checking whether $CC supports $option... " >&6; } ++if eval \${$as_acx_Woption+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ CFLAGS="$option" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main () ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO"; then : ++ eval "$as_acx_Woption=yes" ++else ++ eval "$as_acx_Woption=no" ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ++ + fi ++eval ac_res=\$$as_acx_Woption ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 ++$as_echo "$ac_res" >&6; } ++ if test `eval 'as_val=${'$as_acx_Woption'};$as_echo "$as_val"'` = yes; then : ++ ac_bolt_plugin_warn_cflags="$ac_bolt_plugin_warn_cflags${ac_bolt_plugin_warn_cflags:+ }$real_option" ++fi ++ done ++CFLAGS="$save_CFLAGS" ++ac_ext=c ++ac_cpp='$CPP $CPPFLAGS' ++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ++ac_compiler_gnu=$ac_cv_c_compiler_gnu ++ + + + # Check whether -static-libgcc is supported. + saved_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -static-libgcc" +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -static-libgcc" >&5 +-printf %s "checking for -static-libgcc... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for -static-libgcc" >&5 ++$as_echo_n "checking for -static-libgcc... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int main() {} + _ACEOF +-if ac_fn_c_try_link "$LINENO" +-then : ++if ac_fn_c_try_link "$LINENO"; then : + have_static_libgcc=yes +-else $as_nop ++else + have_static_libgcc=no + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $have_static_libgcc" >&5 +-printf "%s\n" "$have_static_libgcc" >&6; }; ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_static_libgcc" >&5 ++$as_echo "$have_static_libgcc" >&6; }; + LDFLAGS="$saved_LDFLAGS" + # Need -Wc to get it through libtool. + if test "x$have_static_libgcc" = xyes; then +@@ -6879,19 +6197,30 @@ fi + + + # Determine what GCC version number to use in filesystem paths. +-GCC_BASE_VER ++ ++ get_gcc_base_ver="cat" ++ ++# Check whether --with-gcc-major-version-only was given. ++if test "${with_gcc_major_version_only+set}" = set; then : ++ withval=$with_gcc_major_version_only; if test x$with_gcc_major_version_only = xyes ; then ++ get_gcc_base_ver="sed -e 's/^\([0-9]*\).*/\1/'" ++ fi ++ ++fi ++ ++ ++ + + case `pwd` in + *\ * | *\ *) +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 +-printf "%s\n" "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 ++$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; + esac + + + +-macro_version='2.4.7' +-macro_revision='2.4.7' +- ++macro_version='2.2.7a' ++macro_revision='1.3134' + + + +@@ -6905,7 +6234,7 @@ macro_revision='2.4.7' + + + +-ltmain=$ac_aux_dir/ltmain.sh ++ltmain="$ac_aux_dir/ltmain.sh" + + # Backslashify metacharacters that are still active within + # double-quoted strings. +@@ -6928,10 +6257,10 @@ ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ + ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO + ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 +-printf %s "checking how to print strings... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 ++$as_echo_n "checking how to print strings... " >&6; } + # Test print first, because it will be a builtin if present. +-if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ ++if test "X`print -r -- -n 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' + elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then +@@ -6954,13 +6283,13 @@ func_echo_all () + $ECHO "" + } + +-case $ECHO in +- printf*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: printf" >&5 +-printf "%s\n" "printf" >&6; } ;; +- print*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 +-printf "%s\n" "print -r" >&6; } ;; +- *) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: cat" >&5 +-printf "%s\n" "cat" >&6; } ;; ++case "$ECHO" in ++ printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 ++$as_echo "printf" >&6; } ;; ++ print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 ++$as_echo "print -r" >&6; } ;; ++ *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 ++$as_echo "cat" >&6; } ;; + esac + + +@@ -6976,12 +6305,11 @@ esac + + + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 +-printf %s "checking for a sed that does not truncate output... " >&6; } +-if test ${ac_cv_path_SED+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 ++$as_echo_n "checking for a sed that does not truncate output... " >&6; } ++if ${ac_cv_path_SED+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for ac_i in 1 2 3 4 5 6 7; do + ac_script="$ac_script$as_nl$ac_script" +@@ -6995,15 +6323,10 @@ else $as_nop + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_prog in sed gsed +- do ++ test -z "$as_dir" && as_dir=. ++ for ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do +- ac_path_SED="$as_dir$ac_prog$ac_exec_ext" ++ ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_SED" || continue + # Check for GNU ac_path_SED and select it if it is found. + # Check for GNU $ac_path_SED +@@ -7012,13 +6335,13 @@ case `"$ac_path_SED" --version 2>&1` in + ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; + *) + ac_count=0 +- printf %s 0123456789 >"conftest.in" ++ $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" +- printf "%s\n" '' >> "conftest.nl" ++ $as_echo '' >> "conftest.nl" + "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val +@@ -7046,172 +6369,29 @@ else + fi + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 +-printf "%s\n" "$ac_cv_path_SED" >&6; } +- SED="$ac_cv_path_SED" +- rm -f conftest.sed +- +-test -z "$SED" && SED=sed +-Xsed="$SED -e 1s/^X//" +- +- +- +- +- +- +- +- +- +- +- +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +-printf %s "checking for grep that handles long lines and -e... " >&6; } +-if test ${ac_cv_path_GREP+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if test -z "$GREP"; then +- ac_path_GREP_found=false +- # Loop through the user's path and test for each of PROGNAME-LIST +- as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_prog in grep ggrep +- do +- for ac_exec_ext in '' $ac_executable_extensions; do +- ac_path_GREP="$as_dir$ac_prog$ac_exec_ext" +- as_fn_executable_p "$ac_path_GREP" || continue +-# Check for GNU ac_path_GREP and select it if it is found. +- # Check for GNU $ac_path_GREP +-case `"$ac_path_GREP" --version 2>&1` in +-*GNU*) +- ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +-*) +- ac_count=0 +- printf %s 0123456789 >"conftest.in" +- while : +- do +- cat "conftest.in" "conftest.in" >"conftest.tmp" +- mv "conftest.tmp" "conftest.in" +- cp "conftest.in" "conftest.nl" +- printf "%s\n" 'GREP' >> "conftest.nl" +- "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break +- diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break +- as_fn_arith $ac_count + 1 && ac_count=$as_val +- if test $ac_count -gt ${ac_path_GREP_max-0}; then +- # Best one so far, save it but keep looking for a better one +- ac_cv_path_GREP="$ac_path_GREP" +- ac_path_GREP_max=$ac_count +- fi +- # 10*(2^10) chars as input seems more than enough +- test $ac_count -gt 10 && break +- done +- rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +-esac +- +- $ac_path_GREP_found && break 3 +- done +- done +- done +-IFS=$as_save_IFS +- if test -z "$ac_cv_path_GREP"; then +- as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 +- fi +-else +- ac_cv_path_GREP=$GREP +-fi +- +-fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +-printf "%s\n" "$ac_cv_path_GREP" >&6; } +- GREP="$ac_cv_path_GREP" +- +- +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +-printf %s "checking for egrep... " >&6; } +-if test ${ac_cv_path_EGREP+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 +- then ac_cv_path_EGREP="$GREP -E" +- else +- if test -z "$EGREP"; then +- ac_path_EGREP_found=false +- # Loop through the user's path and test for each of PROGNAME-LIST +- as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_prog in egrep +- do +- for ac_exec_ext in '' $ac_executable_extensions; do +- ac_path_EGREP="$as_dir$ac_prog$ac_exec_ext" +- as_fn_executable_p "$ac_path_EGREP" || continue +-# Check for GNU ac_path_EGREP and select it if it is found. +- # Check for GNU $ac_path_EGREP +-case `"$ac_path_EGREP" --version 2>&1` in +-*GNU*) +- ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +-*) +- ac_count=0 +- printf %s 0123456789 >"conftest.in" +- while : +- do +- cat "conftest.in" "conftest.in" >"conftest.tmp" +- mv "conftest.tmp" "conftest.in" +- cp "conftest.in" "conftest.nl" +- printf "%s\n" 'EGREP' >> "conftest.nl" +- "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break +- diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break +- as_fn_arith $ac_count + 1 && ac_count=$as_val +- if test $ac_count -gt ${ac_path_EGREP_max-0}; then +- # Best one so far, save it but keep looking for a better one +- ac_cv_path_EGREP="$ac_path_EGREP" +- ac_path_EGREP_max=$ac_count +- fi +- # 10*(2^10) chars as input seems more than enough +- test $ac_count -gt 10 && break +- done +- rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +-esac +- +- $ac_path_EGREP_found && break 3 +- done +- done +- done +-IFS=$as_save_IFS +- if test -z "$ac_cv_path_EGREP"; then +- as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 +- fi +-else +- ac_cv_path_EGREP=$EGREP +-fi +- +- fi +-fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +-printf "%s\n" "$ac_cv_path_EGREP" >&6; } +- EGREP="$ac_cv_path_EGREP" ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 ++$as_echo "$ac_cv_path_SED" >&6; } ++ SED="$ac_cv_path_SED" ++ rm -f conftest.sed ++ ++test -z "$SED" && SED=sed ++Xsed="$SED -e 1s/^X//" ++ ++ ++ ++ ++ ++ ++ ++ ++ + + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 +-printf %s "checking for fgrep... " >&6; } +-if test ${ac_cv_path_FGREP+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 ++$as_echo_n "checking for fgrep... " >&6; } ++if ${ac_cv_path_FGREP+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 + then ac_cv_path_FGREP="$GREP -F" + else +@@ -7222,15 +6402,10 @@ else $as_nop + for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_prog in fgrep +- do ++ test -z "$as_dir" && as_dir=. ++ for ac_prog in fgrep; do + for ac_exec_ext in '' $ac_executable_extensions; do +- ac_path_FGREP="$as_dir$ac_prog$ac_exec_ext" ++ ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_FGREP" || continue + # Check for GNU ac_path_FGREP and select it if it is found. + # Check for GNU $ac_path_FGREP +@@ -7239,13 +6414,13 @@ case `"$ac_path_FGREP" --version 2>&1` in + ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; + *) + ac_count=0 +- printf %s 0123456789 >"conftest.in" ++ $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" +- printf "%s\n" 'FGREP' >> "conftest.nl" ++ $as_echo 'FGREP' >> "conftest.nl" + "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val +@@ -7274,8 +6449,8 @@ fi + + fi + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 +-printf "%s\n" "$ac_cv_path_FGREP" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 ++$as_echo "$ac_cv_path_FGREP" >&6; } + FGREP="$ac_cv_path_FGREP" + + +@@ -7300,21 +6475,20 @@ test -z "$GREP" && GREP=grep + + + # Check whether --with-gnu-ld was given. +-if test ${with_gnu_ld+y} +-then : +- withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes +-else $as_nop ++if test "${with_gnu_ld+set}" = set; then : ++ withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes ++else + with_gnu_ld=no + fi + + ac_prog=ld +-if test yes = "$GCC"; then ++if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +-printf %s "checking for ld used by $CC... " >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 ++$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) +- # gcc leaves a trailing carriage return, which upsets mingw ++ # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; +@@ -7328,7 +6502,7 @@ printf %s "checking for ld used by $CC... " >&6; } + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done +- test -z "$LD" && LD=$ac_prog ++ test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. +@@ -7339,58 +6513,56 @@ printf %s "checking for ld used by $CC... " >&6; } + with_gnu_ld=unknown + ;; + esac +-elif test yes = "$with_gnu_ld"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +-printf %s "checking for GNU ld... " >&6; } ++elif test "$with_gnu_ld" = yes; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 ++$as_echo_n "checking for GNU ld... " >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +-printf %s "checking for non-GNU ld... " >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 ++$as_echo_n "checking for non-GNU ld... " >&6; } + fi +-if test ${lt_cv_path_LD+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++if ${lt_cv_path_LD+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -z "$LD"; then +- lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ++ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do +- IFS=$lt_save_ifs ++ IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then +- lt_cv_path_LD=$ac_dir/$ac_prog ++ lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +-printf "%s\n" "$LD" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 ++$as_echo "$LD" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +-printf %s "checking if the linker ($LD) is GNU ld... " >&6; } +-if test ${lt_cv_prog_gnu_ld+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 ++$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } ++if ${lt_cv_prog_gnu_ld+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + # I'd rather use --version here, but apparently some GNU lds only accept -v. + case `$LD -v 2>&1 &1 &5 +-printf "%s\n" "$lt_cv_prog_gnu_ld" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_gnu_ld" >&5 ++$as_echo "$lt_cv_prog_gnu_ld" >&6; } + with_gnu_ld=$lt_cv_prog_gnu_ld + + +@@ -7413,46 +6585,40 @@ with_gnu_ld=$lt_cv_prog_gnu_ld + + + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 +-printf %s "checking for BSD- or MS-compatible name lister (nm)... " >&6; } +-if test ${lt_cv_path_NM+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 ++$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } ++if ${lt_cv_path_NM+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$NM"; then + # Let the user override the test. +- lt_cv_path_NM=$NM ++ lt_cv_path_NM="$NM" + else +- lt_nm_to_check=${ac_tool_prefix}nm ++ lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do +- lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ++ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do +- IFS=$lt_save_ifs ++ IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. +- tmp_nm=$ac_dir/$lt_tmp_nm +- if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then ++ tmp_nm="$ac_dir/$lt_tmp_nm" ++ if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. +- # Adding the 'sed 1q' prevents false positives on HP-UX, which says: ++ # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file +- # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty +- case $build_os in +- mingw*) lt_bad_file=conftest.nm/nofile ;; +- *) lt_bad_file=/dev/null ;; +- esac +- case `"$tmp_nm" -B $lt_bad_file 2>&1 | $SED '1q'` in +- *$lt_bad_file* | *'Invalid file or object type'*) ++ case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in ++ */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" +- break 2 ++ break + ;; + *) +- case `"$tmp_nm" -p /dev/null 2>&1 | $SED '1q'` in ++ case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" +- break 2 ++ break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but +@@ -7463,15 +6629,15 @@ else + esac + fi + done +- IFS=$lt_save_ifs ++ IFS="$lt_save_ifs" + done + : ${lt_cv_path_NM=no} + fi + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 +-printf "%s\n" "$lt_cv_path_NM" >&6; } +-if test no != "$lt_cv_path_NM"; then +- NM=$lt_cv_path_NM ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 ++$as_echo "$lt_cv_path_NM" >&6; } ++if test "$lt_cv_path_NM" != "no"; then ++ NM="$lt_cv_path_NM" + else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : +@@ -7482,12 +6648,11 @@ else + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. + set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_DUMPBIN+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_DUMPBIN+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$DUMPBIN"; then + ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. + else +@@ -7495,15 +6660,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -7514,11 +6675,11 @@ fi + fi + DUMPBIN=$ac_cv_prog_DUMPBIN + if test -n "$DUMPBIN"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 +-printf "%s\n" "$DUMPBIN" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 ++$as_echo "$DUMPBIN" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -7531,12 +6692,11 @@ if test -z "$DUMPBIN"; then + do + # Extract the first word of "$ac_prog", so it can be a program name with args. + set dummy $ac_prog; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_DUMPBIN+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_DUMPBIN"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. + else +@@ -7544,15 +6704,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -7563,11 +6719,11 @@ fi + fi + ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN + if test -n "$ac_ct_DUMPBIN"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 +-printf "%s\n" "$ac_ct_DUMPBIN" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 ++$as_echo "$ac_ct_DUMPBIN" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -7579,17 +6735,17 @@ done + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + DUMPBIN=$ac_ct_DUMPBIN + fi + fi + +- case `$DUMPBIN -symbols -headers /dev/null 2>&1 | $SED '1q'` in ++ case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in + *COFF*) +- DUMPBIN="$DUMPBIN -symbols -headers" ++ DUMPBIN="$DUMPBIN -symbols" + ;; + *) + DUMPBIN=: +@@ -7597,8 +6753,8 @@ fi + esac + fi + +- if test : != "$DUMPBIN"; then +- NM=$DUMPBIN ++ if test "$DUMPBIN" != ":"; then ++ NM="$DUMPBIN" + fi + fi + test -z "$NM" && NM=nm +@@ -7608,12 +6764,11 @@ test -z "$NM" && NM=nm + + + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 +-printf %s "checking the name lister ($NM) interface... " >&6; } +-if test ${lt_cv_nm_interface+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 ++$as_echo_n "checking the name lister ($NM) interface... " >&6; } ++if ${lt_cv_nm_interface+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) +@@ -7629,29 +6784,28 @@ else $as_nop + fi + rm -f conftest* + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 +-printf "%s\n" "$lt_cv_nm_interface" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 ++$as_echo "$lt_cv_nm_interface" >&6; } + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 +-printf %s "checking whether ln -s works... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 ++$as_echo_n "checking whether ln -s works... " >&6; } + LN_S=$as_ln_s + if test "$LN_S" = "ln -s"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +-printf "%s\n" "yes" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 ++$as_echo "yes" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 +-printf "%s\n" "no, using $LN_S" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 ++$as_echo "no, using $LN_S" >&6; } + fi + + # find the maximum length of command line arguments +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 +-printf %s "checking the maximum length of command line arguments... " >&6; } +-if test ${lt_cv_sys_max_cmd_len+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 ++$as_echo_n "checking the maximum length of command line arguments... " >&6; } ++if ${lt_cv_sys_max_cmd_len+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + i=0 +- teststring=ABCD ++ teststring="ABCD" + + case $build_os in + msdosdjgpp*) +@@ -7691,7 +6845,7 @@ else $as_nop + lt_cv_sys_max_cmd_len=8192; + ;; + +- bitrig* | darwin* | dragonfly* | freebsd* | midnightbsd* | netbsd* | openbsd*) ++ netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` +@@ -7710,11 +6864,6 @@ else $as_nop + lt_cv_sys_max_cmd_len=196608 + ;; + +- os2*) +- # The test takes a long time on OS/2. +- lt_cv_sys_max_cmd_len=8192 +- ;; +- + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not +@@ -7734,30 +6883,29 @@ else $as_nop + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then +- lt_cv_sys_max_cmd_len=`echo $kargmax | $SED 's/.*[ ]//'` ++ lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` +- if test -n "$lt_cv_sys_max_cmd_len" && \ +- test undefined != "$lt_cv_sys_max_cmd_len"; then ++ if test -n "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. +- for i in 1 2 3 4 5 6 7 8; do ++ for i in 1 2 3 4 5 6 7 8 ; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. +- while { test X`env echo "$teststring$teststring" 2>/dev/null` \ ++ while { test "X"`func_fallback_echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && +- test 17 != "$i" # 1/2 MB should be enough ++ test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring +@@ -7775,12 +6923,12 @@ else $as_nop + + fi + +-if test -n "$lt_cv_sys_max_cmd_len"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 +-printf "%s\n" "$lt_cv_sys_max_cmd_len" >&6; } ++if test -n $lt_cv_sys_max_cmd_len ; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 ++$as_echo "$lt_cv_sys_max_cmd_len" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none" >&5 +-printf "%s\n" "none" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 ++$as_echo "none" >&6; } + fi + max_cmd_len=$lt_cv_sys_max_cmd_len + +@@ -7793,6 +6941,30 @@ max_cmd_len=$lt_cv_sys_max_cmd_len + : ${MV="mv -f"} + : ${RM="rm -f"} + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands some XSI constructs" >&5 ++$as_echo_n "checking whether the shell understands some XSI constructs... " >&6; } ++# Try some XSI features ++xsi_shell=no ++( _lt_dummy="a/b/c" ++ test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ ++ = c,a/b,, \ ++ && eval 'test $(( 1 + 1 )) -eq 2 \ ++ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ ++ && xsi_shell=yes ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $xsi_shell" >&5 ++$as_echo "$xsi_shell" >&6; } ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands \"+=\"" >&5 ++$as_echo_n "checking whether the shell understands \"+=\"... " >&6; } ++lt_shell_append=no ++( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \ ++ >/dev/null 2>&1 \ ++ && lt_shell_append=yes ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_shell_append" >&5 ++$as_echo "$lt_shell_append" >&6; } ++ ++ + if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset + else +@@ -7824,92 +6996,15 @@ esac + + + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 +-printf %s "checking how to convert $build file names to $host format... " >&6; } +-if test ${lt_cv_to_host_file_cmd+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- case $host in +- *-*-mingw* ) +- case $build in +- *-*-mingw* ) # actually msys +- lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 +- ;; +- *-*-cygwin* ) +- lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 +- ;; +- * ) # otherwise, assume *nix +- lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 +- ;; +- esac +- ;; +- *-*-cygwin* ) +- case $build in +- *-*-mingw* ) # actually msys +- lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin +- ;; +- *-*-cygwin* ) +- lt_cv_to_host_file_cmd=func_convert_file_noop +- ;; +- * ) # otherwise, assume *nix +- lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin +- ;; +- esac +- ;; +- * ) # unhandled hosts (and "normal" native builds) +- lt_cv_to_host_file_cmd=func_convert_file_noop +- ;; +-esac +- +-fi +- +-to_host_file_cmd=$lt_cv_to_host_file_cmd +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 +-printf "%s\n" "$lt_cv_to_host_file_cmd" >&6; } +- +- +- +- +- +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 +-printf %s "checking how to convert $build file names to toolchain format... " >&6; } +-if test ${lt_cv_to_tool_file_cmd+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- #assume ordinary cross tools, or native build. +-lt_cv_to_tool_file_cmd=func_convert_file_noop +-case $host in +- *-*-mingw* ) +- case $build in +- *-*-mingw* ) # actually msys +- lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 +- ;; +- esac +- ;; +-esac +- +-fi +- +-to_tool_file_cmd=$lt_cv_to_tool_file_cmd +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 +-printf "%s\n" "$lt_cv_to_tool_file_cmd" >&6; } +- +- +- +- +- +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 +-printf %s "checking for $LD option to reload object files... " >&6; } +-if test ${lt_cv_ld_reload_flag+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 ++$as_echo_n "checking for $LD option to reload object files... " >&6; } ++if ${lt_cv_ld_reload_flag+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_ld_reload_flag='-r' + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 +-printf "%s\n" "$lt_cv_ld_reload_flag" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 ++$as_echo "$lt_cv_ld_reload_flag" >&6; } + reload_flag=$lt_cv_ld_reload_flag + case $reload_flag in + "" | " "*) ;; +@@ -7917,14 +7012,9 @@ case $reload_flag in + esac + reload_cmds='$LD$reload_flag -o $output$reload_objs' + case $host_os in +- cygwin* | mingw* | pw32* | cegcc*) +- if test yes != "$GCC"; then +- reload_cmds=false +- fi +- ;; + darwin*) +- if test yes = "$GCC"; then +- reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' ++ if test "$GCC" = yes; then ++ reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi +@@ -7939,123 +7029,14 @@ esac + + + +-if test -n "$ac_tool_prefix"; then +- # Extract the first word of "${ac_tool_prefix}file", so it can be a program name with args. +-set dummy ${ac_tool_prefix}file; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_FILECMD+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if test -n "$FILECMD"; then +- ac_cv_prog_FILECMD="$FILECMD" # Let the user override the test. +-else +-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- ac_cv_prog_FILECMD="${ac_tool_prefix}file" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 +- break 2 +- fi +-done +- done +-IFS=$as_save_IFS +- +-fi +-fi +-FILECMD=$ac_cv_prog_FILECMD +-if test -n "$FILECMD"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $FILECMD" >&5 +-printf "%s\n" "$FILECMD" >&6; } +-else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +-fi +- +- +-fi +-if test -z "$ac_cv_prog_FILECMD"; then +- ac_ct_FILECMD=$FILECMD +- # Extract the first word of "file", so it can be a program name with args. +-set dummy file; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_FILECMD+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if test -n "$ac_ct_FILECMD"; then +- ac_cv_prog_ac_ct_FILECMD="$ac_ct_FILECMD" # Let the user override the test. +-else +-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_FILECMD="file" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 +- break 2 +- fi +-done +- done +-IFS=$as_save_IFS +- +-fi +-fi +-ac_ct_FILECMD=$ac_cv_prog_ac_ct_FILECMD +-if test -n "$ac_ct_FILECMD"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_FILECMD" >&5 +-printf "%s\n" "$ac_ct_FILECMD" >&6; } +-else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +-fi +- +- if test "x$ac_ct_FILECMD" = x; then +- FILECMD=":" +- else +- case $cross_compiling:$ac_tool_warned in +-yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +-ac_tool_warned=yes ;; +-esac +- FILECMD=$ac_ct_FILECMD +- fi +-else +- FILECMD="$ac_cv_prog_FILECMD" +-fi +- +- +- +- +- +- +- + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. + set dummy ${ac_tool_prefix}objdump; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_OBJDUMP+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_OBJDUMP+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$OBJDUMP"; then + ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. + else +@@ -8063,15 +7044,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -8082,11 +7059,11 @@ fi + fi + OBJDUMP=$ac_cv_prog_OBJDUMP + if test -n "$OBJDUMP"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 +-printf "%s\n" "$OBJDUMP" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 ++$as_echo "$OBJDUMP" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -8095,12 +7072,11 @@ if test -z "$ac_cv_prog_OBJDUMP"; then + ac_ct_OBJDUMP=$OBJDUMP + # Extract the first word of "objdump", so it can be a program name with args. + set dummy objdump; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_OBJDUMP+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_OBJDUMP"; then + ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. + else +@@ -8108,15 +7084,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OBJDUMP="objdump" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -8127,11 +7099,11 @@ fi + fi + ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP + if test -n "$ac_ct_OBJDUMP"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 +-printf "%s\n" "$ac_ct_OBJDUMP" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 ++$as_echo "$ac_ct_OBJDUMP" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + if test "x$ac_ct_OBJDUMP" = x; then +@@ -8139,8 +7111,8 @@ fi + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + OBJDUMP=$ac_ct_OBJDUMP +@@ -8159,25 +7131,24 @@ test -z "$OBJDUMP" && OBJDUMP=objdump + + + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 +-printf %s "checking how to recognize dependent libraries... " >&6; } +-if test ${lt_cv_deplibs_check_method+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 ++$as_echo_n "checking how to recognize dependent libraries... " >&6; } ++if ${lt_cv_deplibs_check_method+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_file_magic_cmd='$MAGIC_CMD' + lt_cv_file_magic_test_file= + lt_cv_deplibs_check_method='unknown' + # Need to set the preceding variable on all platforms that support + # interlibrary dependencies. + # 'none' -- dependencies not supported. +-# 'unknown' -- same as none, but documents that we really don't know. ++# `unknown' -- same as none, but documents that we really don't know. + # 'pass_all' -- all dependencies passed with no checks. + # 'test_compile' -- check by making test program. + # 'file_magic [[regex]]' -- check by looking for files in library path +-# that responds to the $file_magic_cmd with a given extended regex. +-# If you have 'file' or equivalent on your system and you're not sure +-# whether 'pass_all' will *always* work, you probably want this one. ++# which responds to the $file_magic_cmd with a given extended regex. ++# If you have `file' or equivalent on your system and you're not sure ++# whether `pass_all' will *always* work, you probably want this one. + + case $host_os in + aix[4-9]*) +@@ -8190,7 +7161,7 @@ beos*) + + bsdi[45]*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' +- lt_cv_file_magic_cmd='$FILECMD -L' ++ lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; + +@@ -8204,12 +7175,12 @@ mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump', + # unless we find 'file', for example because we are cross-compiling. +- if ( file / ) >/dev/null 2>&1; then ++ # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin. ++ if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else +- # Keep this pattern in sync with the one in func_win32_libid. +- lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' ++ lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; +@@ -8224,14 +7195,14 @@ darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +-freebsd* | dragonfly* | midnightbsd*) ++freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' +- lt_cv_file_magic_cmd=$FILECMD ++ lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac +@@ -8240,12 +7211,16 @@ freebsd* | dragonfly* | midnightbsd*) + fi + ;; + ++gnu*) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ + haiku*) + lt_cv_deplibs_check_method=pass_all + ;; + + hpux10.20* | hpux11*) +- lt_cv_file_magic_cmd=$FILECMD ++ lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' +@@ -8277,8 +7252,8 @@ irix5* | irix6* | nonstopux*) + lt_cv_deplibs_check_method=pass_all + ;; + +-# This must be glibc/ELF. +-linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++# This must be Linux ELF. ++linux* | k*bsd*-gnu | kopensolaris*-gnu | uclinuxfdpiceabi) + lt_cv_deplibs_check_method=pass_all + ;; + +@@ -8292,7 +7267,7 @@ netbsd*) + + newos6*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' +- lt_cv_file_magic_cmd=$FILECMD ++ lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +@@ -8300,8 +7275,8 @@ newos6*) + lt_cv_deplibs_check_method=pass_all + ;; + +-openbsd* | bitrig*) +- if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then ++openbsd*) ++ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' +@@ -8333,220 +7308,40 @@ sysv4 | sysv4.3*) + ncr) + lt_cv_deplibs_check_method=pass_all + ;; +- sequent) +- lt_cv_file_magic_cmd='/bin/file' +- lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' +- ;; +- sni) +- lt_cv_file_magic_cmd='/bin/file' +- lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" +- lt_cv_file_magic_test_file=/lib/libc.so +- ;; +- siemens) +- lt_cv_deplibs_check_method=pass_all +- ;; +- pc) +- lt_cv_deplibs_check_method=pass_all +- ;; +- esac +- ;; +- +-tpf*) +- lt_cv_deplibs_check_method=pass_all +- ;; +-os2*) +- lt_cv_deplibs_check_method=pass_all +- ;; +-esac +- +-fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 +-printf "%s\n" "$lt_cv_deplibs_check_method" >&6; } +- +-file_magic_glob= +-want_nocaseglob=no +-if test "$build" = "$host"; then +- case $host_os in +- mingw* | pw32*) +- if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then +- want_nocaseglob=yes +- else +- file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` +- fi +- ;; +- esac +-fi +- +-file_magic_cmd=$lt_cv_file_magic_cmd +-deplibs_check_method=$lt_cv_deplibs_check_method +-test -z "$deplibs_check_method" && deplibs_check_method=unknown +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +-if test -n "$ac_tool_prefix"; then +- # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. +-set dummy ${ac_tool_prefix}dlltool; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_DLLTOOL+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if test -n "$DLLTOOL"; then +- ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. +-else +-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 +- break 2 +- fi +-done +- done +-IFS=$as_save_IFS +- +-fi +-fi +-DLLTOOL=$ac_cv_prog_DLLTOOL +-if test -n "$DLLTOOL"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 +-printf "%s\n" "$DLLTOOL" >&6; } +-else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +-fi +- +- +-fi +-if test -z "$ac_cv_prog_DLLTOOL"; then +- ac_ct_DLLTOOL=$DLLTOOL +- # Extract the first word of "dlltool", so it can be a program name with args. +-set dummy dlltool; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_DLLTOOL+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if test -n "$ac_ct_DLLTOOL"; then +- ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. +-else +-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_DLLTOOL="dlltool" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 +- break 2 +- fi +-done +- done +-IFS=$as_save_IFS +- +-fi +-fi +-ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL +-if test -n "$ac_ct_DLLTOOL"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 +-printf "%s\n" "$ac_ct_DLLTOOL" >&6; } +-else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +-fi +- +- if test "x$ac_ct_DLLTOOL" = x; then +- DLLTOOL="false" +- else +- case $cross_compiling:$ac_tool_warned in +-yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +-ac_tool_warned=yes ;; +-esac +- DLLTOOL=$ac_ct_DLLTOOL +- fi +-else +- DLLTOOL="$ac_cv_prog_DLLTOOL" +-fi +- +-test -z "$DLLTOOL" && DLLTOOL=dlltool +- +- +- +- +- +- +- +- +- +- +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 +-printf %s "checking how to associate runtime and link libraries... " >&6; } +-if test ${lt_cv_sharedlib_from_linklib_cmd+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- lt_cv_sharedlib_from_linklib_cmd='unknown' +- +-case $host_os in +-cygwin* | mingw* | pw32* | cegcc*) +- # two different shell functions defined in ltmain.sh; +- # decide which one to use based on capabilities of $DLLTOOL +- case `$DLLTOOL --help 2>&1` in +- *--identify-strict*) +- lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib +- ;; +- *) +- lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ++ sequent) ++ lt_cv_file_magic_cmd='/bin/file' ++ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' ++ ;; ++ sni) ++ lt_cv_file_magic_cmd='/bin/file' ++ lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" ++ lt_cv_file_magic_test_file=/lib/libc.so ++ ;; ++ siemens) ++ lt_cv_deplibs_check_method=pass_all ++ ;; ++ pc) ++ lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; +-*) +- # fallback: assume linklib IS sharedlib +- lt_cv_sharedlib_from_linklib_cmd=$ECHO ++ ++tpf*) ++ lt_cv_deplibs_check_method=pass_all + ;; + esac + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 +-printf "%s\n" "$lt_cv_sharedlib_from_linklib_cmd" >&6; } +-sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +-test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 ++$as_echo "$lt_cv_deplibs_check_method" >&6; } ++file_magic_cmd=$lt_cv_file_magic_cmd ++deplibs_check_method=$lt_cv_deplibs_check_method ++test -z "$deplibs_check_method" && deplibs_check_method=unknown ++ ++ ++ ++ ++ + + + +@@ -8555,16 +7350,13 @@ test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO + + + if test -n "$ac_tool_prefix"; then +- for ac_prog in ar +- do +- # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +-set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_AR+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. ++set dummy ${ac_tool_prefix}ar; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_AR+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. + else +@@ -8572,15 +7364,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- ac_cv_prog_AR="$ac_tool_prefix$ac_prog" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_AR="${ac_tool_prefix}ar" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -8591,29 +7379,24 @@ fi + fi + AR=$ac_cv_prog_AR + if test -n "$AR"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +-printf "%s\n" "$AR" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 ++$as_echo "$AR" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +- test -n "$AR" && break +- done + fi +-if test -z "$AR"; then ++if test -z "$ac_cv_prog_AR"; then + ac_ct_AR=$AR +- for ac_prog in ar +-do +- # Extract the first word of "$ac_prog", so it can be a program name with args. +-set dummy $ac_prog; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_AR+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ # Extract the first word of "ar", so it can be a program name with args. ++set dummy ar; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_AR+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. + else +@@ -8621,15 +7404,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_AR="$ac_prog" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_AR="ar" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -8640,112 +7419,34 @@ fi + fi + ac_ct_AR=$ac_cv_prog_ac_ct_AR + if test -n "$ac_ct_AR"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +-printf "%s\n" "$ac_ct_AR" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 ++$as_echo "$ac_ct_AR" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + +- +- test -n "$ac_ct_AR" && break +-done +- + if test "x$ac_ct_AR" = x; then + AR="false" + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + AR=$ac_ct_AR + fi ++else ++ AR="$ac_cv_prog_AR" + fi + +-: ${AR=ar} +- +- +- +- +- +- +-# Use ARFLAGS variable as AR's operation code to sync the variable naming with +-# Automake. If both AR_FLAGS and ARFLAGS are specified, AR_FLAGS should have +-# higher priority because thats what people were doing historically (setting +-# ARFLAGS for automake and AR_FLAGS for libtool). FIXME: Make the AR_FLAGS +-# variable obsoleted/removed. +- +-test ${AR_FLAGS+y} || AR_FLAGS=${ARFLAGS-cr} +-lt_ar_flags=$AR_FLAGS +- +- +- +- +- +- +-# Make AR_FLAGS overridable by 'make ARFLAGS='. Don't try to run-time override +-# by AR_FLAGS because that was never working and AR_FLAGS is about to die. +- +- +- +- +- +- +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 +-printf %s "checking for archiver @FILE support... " >&6; } +-if test ${lt_cv_ar_at_file+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- lt_cv_ar_at_file=no +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext +-/* end confdefs.h. */ +- +-int +-main (void) +-{ ++test -z "$AR" && AR=ar ++test -z "$AR_FLAGS" && AR_FLAGS=cru + +- ; +- return 0; +-} +-_ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : +- echo conftest.$ac_objext > conftest.lst +- lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' +- { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 +- (eval $lt_ar_try) 2>&5 +- ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 +- test $ac_status = 0; } +- if test 0 -eq "$ac_status"; then +- # Ensure the archiver fails upon bogus file names. +- rm -f conftest.$ac_objext libconftest.a +- { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 +- (eval $lt_ar_try) 2>&5 +- ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 +- test $ac_status = 0; } +- if test 0 -ne "$ac_status"; then +- lt_cv_ar_at_file=@ +- fi +- fi +- rm -f conftest.* libconftest.a + +-fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext + +-fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 +-printf "%s\n" "$lt_cv_ar_at_file" >&6; } + +-if test no = "$lt_cv_ar_at_file"; then +- archiver_list_spec= +-else +- archiver_list_spec=$lt_cv_ar_at_file +-fi + + + +@@ -8756,12 +7457,11 @@ fi + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. + set dummy ${ac_tool_prefix}strip; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_STRIP+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_STRIP+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. + else +@@ -8769,15 +7469,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -8788,11 +7484,11 @@ fi + fi + STRIP=$ac_cv_prog_STRIP + if test -n "$STRIP"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +-printf "%s\n" "$STRIP" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 ++$as_echo "$STRIP" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -8801,12 +7497,11 @@ if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. + set dummy strip; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_STRIP+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_STRIP+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. + else +@@ -8814,15 +7509,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -8833,11 +7524,11 @@ fi + fi + ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP + if test -n "$ac_ct_STRIP"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +-printf "%s\n" "$ac_ct_STRIP" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 ++$as_echo "$ac_ct_STRIP" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + if test "x$ac_ct_STRIP" = x; then +@@ -8845,8 +7536,8 @@ fi + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + STRIP=$ac_ct_STRIP +@@ -8865,12 +7556,11 @@ test -z "$STRIP" && STRIP=: + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. + set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_RANLIB+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_RANLIB+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. + else +@@ -8878,15 +7568,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -8897,11 +7583,11 @@ fi + fi + RANLIB=$ac_cv_prog_RANLIB + if test -n "$RANLIB"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +-printf "%s\n" "$RANLIB" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 ++$as_echo "$RANLIB" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -8910,12 +7596,11 @@ if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. + set dummy ranlib; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_RANLIB+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. + else +@@ -8923,15 +7608,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_RANLIB="ranlib" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -8942,11 +7623,11 @@ fi + fi + ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB + if test -n "$ac_ct_RANLIB"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +-printf "%s\n" "$ac_ct_RANLIB" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 ++$as_echo "$ac_ct_RANLIB" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + if test "x$ac_ct_RANLIB" = x; then +@@ -8954,8 +7635,8 @@ fi + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + RANLIB=$ac_ct_RANLIB +@@ -8978,14 +7659,14 @@ old_postuninstall_cmds= + + if test -n "$RANLIB"; then + case $host_os in +- bitrig* | openbsd*) +- old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" ++ openbsd*) ++ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" + ;; + *) +- old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" ++ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" + ;; + esac +- old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" ++ old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" + fi + + case $host_os in +@@ -9044,12 +7725,11 @@ compiler=$CC + + + # Check for command to grab the raw symbol name followed by C symbol from nm. +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 +-printf %s "checking command to parse $NM output from $compiler object... " >&6; } +-if test ${lt_cv_sys_global_symbol_pipe+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 ++$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } ++if ${lt_cv_sys_global_symbol_pipe+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + + # These are sane defaults that work on at least a few old systems. + # [They come from Ultrix. What could be older than Ultrix?!! ;)] +@@ -9069,7 +7749,7 @@ cygwin* | mingw* | pw32* | cegcc*) + symcode='[ABCDGISTW]' + ;; + hpux*) +- if test ia64 = "$host_cpu"; then ++ if test "$host_cpu" = ia64; then + symcode='[ABCDEGRST]' + fi + ;; +@@ -9102,44 +7782,14 @@ case `$NM -V 2>&1` in + symcode='[ABCDGIRSTW]' ;; + esac + +-if test "$lt_cv_nm_interface" = "MS dumpbin"; then +- # Gets list of data symbols to import. +- lt_cv_sys_global_symbol_to_import="$SED -n -e 's/^I .* \(.*\)$/\1/p'" +- # Adjust the below global symbol transforms to fixup imported variables. +- lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" +- lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" +- lt_c_name_lib_hook="\ +- -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ +- -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" +-else +- # Disable hooks by default. +- lt_cv_sys_global_symbol_to_import= +- lt_cdecl_hook= +- lt_c_name_hook= +- lt_c_name_lib_hook= +-fi +- + # Transform an extracted symbol line into a proper C declaration. + # Some systems (esp. on ia64) link data and code symbols differently, + # so use this general approach. +-lt_cv_sys_global_symbol_to_cdecl="$SED -n"\ +-$lt_cdecl_hook\ +-" -e 's/^T .* \(.*\)$/extern int \1();/p'"\ +-" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" ++lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + + # Transform an extracted symbol line into symbol name and symbol address +-lt_cv_sys_global_symbol_to_c_name_address="$SED -n"\ +-$lt_c_name_hook\ +-" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +-" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" +- +-# Transform an extracted symbol line into symbol name with lib prefix and +-# symbol address. +-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="$SED -n"\ +-$lt_c_name_lib_hook\ +-" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +-" -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ +-" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" ++lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + + # Handle CRLF in mingw tool chain + opt_cr= +@@ -9157,29 +7807,24 @@ for ac_symprfx in "" "_"; do + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then +- # Fake it for dumpbin and say T for any non-static function, +- # D for any global variable and I for any imported variable. +- # Also find C++ and __fastcall symbols from MSVC++ or ICC, ++ # Fake it for dumpbin and say T for any non-static function ++ # and D for any global variable. ++ # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK '"\ + " {last_section=section; section=\$ 3};"\ +-" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ + " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +-" /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ +-" /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ +-" /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ + " \$ 0!~/External *\|/{next};"\ + " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ + " {if(hide[section]) next};"\ +-" {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ +-" {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ +-" s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ +-" s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ ++" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ ++" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ ++" s[1]~/^[@?]/{print s[1], s[1]; next};"\ ++" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ + " ' prfx=^$ac_symprfx" + else +- lt_cv_sys_global_symbol_pipe="$SED -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" ++ lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi +- lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | $SED '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no +@@ -9201,14 +7846,14 @@ _LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Now try to grab the symbols. + nlist=conftest.nm + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 + (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then +@@ -9221,18 +7866,6 @@ _LT_EOF + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +-/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +-#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +-/* DATA imports from DLLs on WIN32 can't be const, because runtime +- relocations are performed -- see ld's documentation on pseudo-relocs. */ +-# define LT_DLSYM_CONST +-#elif defined __osf__ +-/* This system does not cope well with relocations in const data. */ +-# define LT_DLSYM_CONST +-#else +-# define LT_DLSYM_CONST const +-#endif +- + #ifdef __cplusplus + extern "C" { + #endif +@@ -9244,7 +7877,7 @@ _LT_EOF + cat <<_LT_EOF >> conftest.$ac_ext + + /* The mapping between symbol names and symbols. */ +-LT_DLSYM_CONST struct { ++const struct { + const char *name; + void *address; + } +@@ -9252,7 +7885,7 @@ lt__PROGRAM__LTX_preloaded_symbols[] = + { + { "@PROGRAM@", (void *) 0 }, + _LT_EOF +- $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext ++ $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} + }; +@@ -9270,19 +7903,19 @@ static const void *lt_preloaded_setup() { + _LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext +- lt_globsym_save_LIBS=$LIBS +- lt_globsym_save_CFLAGS=$CFLAGS +- LIBS=conftstm.$ac_objext ++ lt_save_LIBS="$LIBS" ++ lt_save_CFLAGS="$CFLAGS" ++ LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 +- test $ac_status = 0; } && test -s conftest$ac_exeext; then ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi +- LIBS=$lt_globsym_save_LIBS +- CFLAGS=$lt_globsym_save_CFLAGS ++ LIBS="$lt_save_LIBS" ++ CFLAGS="$lt_save_CFLAGS" + else + echo "cannot find nm_test_func in $nlist" >&5 + fi +@@ -9299,7 +7932,7 @@ _LT_EOF + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. +- if test yes = "$pipe_works"; then ++ if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= +@@ -9312,18 +7945,11 @@ if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= + fi + if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: failed" >&5 +-printf "%s\n" "failed" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 ++$as_echo "failed" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +-printf "%s\n" "ok" >&6; } +-fi +- +-# Response file support. +-if test "$lt_cv_nm_interface" = "MS dumpbin"; then +- nm_file_list_spec='@' +-elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then +- nm_file_list_spec='@' ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 ++$as_echo "ok" >&6; } + fi + + +@@ -9346,189 +7972,46 @@ fi + + + +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 +-printf %s "checking for sysroot... " >&6; } +- +-# Check whether --with-sysroot was given. +-if test ${with_sysroot+y} +-then : +- withval=$with_sysroot; +-else $as_nop +- with_sysroot=no +-fi +- +- +-lt_sysroot= +-case $with_sysroot in #( +- yes) +- if test yes = "$GCC"; then +- lt_sysroot=`$CC --print-sysroot 2>/dev/null` +- fi +- ;; #( +- /*) +- lt_sysroot=`echo "$with_sysroot" | $SED -e "$sed_quote_subst"` +- ;; #( +- no|'') +- ;; #( +- *) +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5 +-printf "%s\n" "$with_sysroot" >&6; } +- as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 +- ;; +-esac +- +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 +-printf "%s\n" "${lt_sysroot:-no}" >&6; } +- +- +- +- +- +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5 +-printf %s "checking for a working dd... " >&6; } +-if test ${ac_cv_path_lt_DD+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- printf 0123456789abcdef0123456789abcdef >conftest.i +-cat conftest.i conftest.i >conftest2.i +-: ${lt_DD:=$DD} +-if test -z "$lt_DD"; then +- ac_path_lt_DD_found=false +- # Loop through the user's path and test for each of PROGNAME-LIST +- as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_prog in dd +- do +- for ac_exec_ext in '' $ac_executable_extensions; do +- ac_path_lt_DD="$as_dir$ac_prog$ac_exec_ext" +- as_fn_executable_p "$ac_path_lt_DD" || continue +-if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then +- cmp -s conftest.i conftest.out \ +- && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: +-fi +- $ac_path_lt_DD_found && break 3 +- done +- done +- done +-IFS=$as_save_IFS +- if test -z "$ac_cv_path_lt_DD"; then +- : +- fi +-else +- ac_cv_path_lt_DD=$lt_DD +-fi +- +-rm -f conftest.i conftest2.i conftest.out +-fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5 +-printf "%s\n" "$ac_cv_path_lt_DD" >&6; } +- +- +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5 +-printf %s "checking how to truncate binary pipes... " >&6; } +-if test ${lt_cv_truncate_bin+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- printf 0123456789abcdef0123456789abcdef >conftest.i +-cat conftest.i conftest.i >conftest2.i +-lt_cv_truncate_bin= +-if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then +- cmp -s conftest.i conftest.out \ +- && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" +-fi +-rm -f conftest.i conftest2.i conftest.out +-test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q" +-fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5 +-printf "%s\n" "$lt_cv_truncate_bin" >&6; } +- +- +- +- +- +- +- +-# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +-func_cc_basename () +-{ +- for cc_temp in $*""; do +- case $cc_temp in +- compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; +- distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; +- \-*) ;; +- *) break;; +- esac +- done +- func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +-} + + # Check whether --enable-libtool-lock was given. +-if test ${enable_libtool_lock+y} +-then : ++if test "${enable_libtool_lock+set}" = set; then : + enableval=$enable_libtool_lock; + fi + +-test no = "$enable_libtool_lock" || enable_libtool_lock=yes ++test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + + # Some flags need to be propagated to the compiler or linker for good + # libtool support. + case $host in + ia64-*-hpux*) +- # Find out what ABI is being produced by ac_compile, and set mode +- # options accordingly. ++ # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then +- case `$FILECMD conftest.$ac_objext` in ++ case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) +- HPUX_IA64_MODE=32 ++ HPUX_IA64_MODE="32" + ;; + *ELF-64*) +- HPUX_IA64_MODE=64 ++ HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; + *-*-irix6*) +- # Find out what ABI is being produced by ac_compile, and set linker +- # options accordingly. ++ # Find out which ABI we are using. + echo '#line '$LINENO' "configure"' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then +- if test yes = "$lt_cv_prog_gnu_ld"; then +- case `$FILECMD conftest.$ac_objext` in ++ if test "$lt_cv_prog_gnu_ld" = yes; then ++ case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; +@@ -9540,7 +8023,7 @@ ia64-*-hpux*) + ;; + esac + else +- case `$FILECMD conftest.$ac_objext` in ++ case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; +@@ -9556,64 +8039,23 @@ ia64-*-hpux*) + rm -rf conftest* + ;; + +-mips64*-*linux*) +- # Find out what ABI is being produced by ac_compile, and set linker +- # options accordingly. +- echo '#line '$LINENO' "configure"' > conftest.$ac_ext +- if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 +- (eval $ac_compile) 2>&5 +- ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 +- test $ac_status = 0; }; then +- emul=elf +- case `$FILECMD conftest.$ac_objext` in +- *32-bit*) +- emul="${emul}32" +- ;; +- *64-bit*) +- emul="${emul}64" +- ;; +- esac +- case `$FILECMD conftest.$ac_objext` in +- *MSB*) +- emul="${emul}btsmip" +- ;; +- *LSB*) +- emul="${emul}ltsmip" +- ;; +- esac +- case `$FILECMD conftest.$ac_objext` in +- *N32*) +- emul="${emul}n32" +- ;; +- esac +- LD="${LD-ld} -m $emul" +- fi +- rm -rf conftest* +- ;; +- + x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ + s390*-*linux*|s390*-*tpf*|sparc*-*linux*) +- # Find out what ABI is being produced by ac_compile, and set linker +- # options accordingly. Note that the listed cases only cover the +- # situations where additional linker options are needed (such as when +- # doing 32-bit compilation for a host where ld defaults to 64-bit, or +- # vice versa); the common cases where no linker options are needed do +- # not appear in the list. ++ # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then +- case `$FILECMD conftest.o` in ++ case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) +- case `$FILECMD conftest.o` in ++ case `/usr/bin/file conftest.o` in + *x86-64*) + LD="${LD-ld} -m elf32_x86_64" + ;; +@@ -9665,14 +8107,13 @@ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + + *-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. +- SAVE_CFLAGS=$CFLAGS ++ SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 +-printf %s "checking whether the C compiler needs -belf... " >&6; } +-if test ${lt_cv_cc_needs_belf+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 ++$as_echo_n "checking whether the C compiler needs -belf... " >&6; } ++if ${lt_cv_cc_needs_belf+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + ac_ext=c + ac_cpp='$CPP $CPPFLAGS' + ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +@@ -9683,20 +8124,19 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_link "$LINENO" +-then : ++if ac_fn_c_try_link "$LINENO"; then : + lt_cv_cc_needs_belf=yes +-else $as_nop ++else + lt_cv_cc_needs_belf=no + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c + ac_cpp='$CPP $CPPFLAGS' +@@ -9705,39 +8145,25 @@ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $ + ac_compiler_gnu=$ac_cv_c_compiler_gnu + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 +-printf "%s\n" "$lt_cv_cc_needs_belf" >&6; } +- if test yes != "$lt_cv_cc_needs_belf"; then ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 ++$as_echo "$lt_cv_cc_needs_belf" >&6; } ++ if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf +- CFLAGS=$SAVE_CFLAGS ++ CFLAGS="$SAVE_CFLAGS" + fi + ;; +-*-*solaris*) +- # Find out what ABI is being produced by ac_compile, and set linker +- # options accordingly. ++sparc*-*solaris*) ++ # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then +- case `$FILECMD conftest.o` in ++ case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in +- yes*) +- case $host in +- i?86-*-solaris*|x86_64-*-solaris*) +- LD="${LD-ld} -m elf_x86_64" +- ;; +- sparc*-*-solaris*) +- LD="${LD-ld} -m elf64_sparc" +- ;; +- esac +- # GNU ld 2.21 introduced _sol2 emulations. Use them if available. +- if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then +- LD=${LD-ld}_sol2 +- fi +- ;; ++ yes*) LD="${LD-ld} -m elf64_sparc" ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" +@@ -9751,135 +8177,7 @@ printf "%s\n" "$lt_cv_cc_needs_belf" >&6; } + ;; + esac + +-need_locks=$enable_libtool_lock +- +-if test -n "$ac_tool_prefix"; then +- # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. +-set dummy ${ac_tool_prefix}mt; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_MANIFEST_TOOL+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if test -n "$MANIFEST_TOOL"; then +- ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. +-else +-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 +- break 2 +- fi +-done +- done +-IFS=$as_save_IFS +- +-fi +-fi +-MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL +-if test -n "$MANIFEST_TOOL"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 +-printf "%s\n" "$MANIFEST_TOOL" >&6; } +-else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +-fi +- +- +-fi +-if test -z "$ac_cv_prog_MANIFEST_TOOL"; then +- ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL +- # Extract the first word of "mt", so it can be a program name with args. +-set dummy mt; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_MANIFEST_TOOL+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if test -n "$ac_ct_MANIFEST_TOOL"; then +- ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. +-else +-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +-for as_dir in $PATH +-do +- IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 +- break 2 +- fi +-done +- done +-IFS=$as_save_IFS +- +-fi +-fi +-ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL +-if test -n "$ac_ct_MANIFEST_TOOL"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 +-printf "%s\n" "$ac_ct_MANIFEST_TOOL" >&6; } +-else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +-fi +- +- if test "x$ac_ct_MANIFEST_TOOL" = x; then +- MANIFEST_TOOL=":" +- else +- case $cross_compiling:$ac_tool_warned in +-yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +-ac_tool_warned=yes ;; +-esac +- MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL +- fi +-else +- MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" +-fi +- +-test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 +-printf %s "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } +-if test ${lt_cv_path_mainfest_tool+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- lt_cv_path_mainfest_tool=no +- echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 +- $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out +- cat conftest.err >&5 +- if $GREP 'Manifest Tool' conftest.out > /dev/null; then +- lt_cv_path_mainfest_tool=yes +- fi +- rm -f conftest* +-fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 +-printf "%s\n" "$lt_cv_path_mainfest_tool" >&6; } +-if test yes != "$lt_cv_path_mainfest_tool"; then +- MANIFEST_TOOL=: +-fi +- +- +- +- ++need_locks="$enable_libtool_lock" + + + case $host_os in +@@ -9887,12 +8185,11 @@ fi + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. + set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_DSYMUTIL+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_DSYMUTIL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$DSYMUTIL"; then + ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. + else +@@ -9900,15 +8197,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -9919,11 +8212,11 @@ fi + fi + DSYMUTIL=$ac_cv_prog_DSYMUTIL + if test -n "$DSYMUTIL"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 +-printf "%s\n" "$DSYMUTIL" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 ++$as_echo "$DSYMUTIL" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -9932,12 +8225,11 @@ if test -z "$ac_cv_prog_DSYMUTIL"; then + ac_ct_DSYMUTIL=$DSYMUTIL + # Extract the first word of "dsymutil", so it can be a program name with args. + set dummy dsymutil; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_DSYMUTIL+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_DSYMUTIL"; then + ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. + else +@@ -9945,15 +8237,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -9964,11 +8252,11 @@ fi + fi + ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL + if test -n "$ac_ct_DSYMUTIL"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 +-printf "%s\n" "$ac_ct_DSYMUTIL" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 ++$as_echo "$ac_ct_DSYMUTIL" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + if test "x$ac_ct_DSYMUTIL" = x; then +@@ -9976,8 +8264,8 @@ fi + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + DSYMUTIL=$ac_ct_DSYMUTIL +@@ -9989,12 +8277,11 @@ fi + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. + set dummy ${ac_tool_prefix}nmedit; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_NMEDIT+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_NMEDIT+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$NMEDIT"; then + ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. + else +@@ -10002,15 +8289,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -10021,11 +8304,11 @@ fi + fi + NMEDIT=$ac_cv_prog_NMEDIT + if test -n "$NMEDIT"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 +-printf "%s\n" "$NMEDIT" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 ++$as_echo "$NMEDIT" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -10034,12 +8317,11 @@ if test -z "$ac_cv_prog_NMEDIT"; then + ac_ct_NMEDIT=$NMEDIT + # Extract the first word of "nmedit", so it can be a program name with args. + set dummy nmedit; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_NMEDIT+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_NMEDIT"; then + ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. + else +@@ -10047,15 +8329,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_NMEDIT="nmedit" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -10066,11 +8344,11 @@ fi + fi + ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT + if test -n "$ac_ct_NMEDIT"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 +-printf "%s\n" "$ac_ct_NMEDIT" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 ++$as_echo "$ac_ct_NMEDIT" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + if test "x$ac_ct_NMEDIT" = x; then +@@ -10078,8 +8356,8 @@ fi + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + NMEDIT=$ac_ct_NMEDIT +@@ -10091,12 +8369,11 @@ fi + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. + set dummy ${ac_tool_prefix}lipo; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_LIPO+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_LIPO+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$LIPO"; then + ac_cv_prog_LIPO="$LIPO" # Let the user override the test. + else +@@ -10104,15 +8381,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_LIPO="${ac_tool_prefix}lipo" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -10123,11 +8396,11 @@ fi + fi + LIPO=$ac_cv_prog_LIPO + if test -n "$LIPO"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 +-printf "%s\n" "$LIPO" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 ++$as_echo "$LIPO" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -10136,12 +8409,11 @@ if test -z "$ac_cv_prog_LIPO"; then + ac_ct_LIPO=$LIPO + # Extract the first word of "lipo", so it can be a program name with args. + set dummy lipo; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_LIPO+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_LIPO+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_LIPO"; then + ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. + else +@@ -10149,15 +8421,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_LIPO="lipo" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -10168,11 +8436,11 @@ fi + fi + ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO + if test -n "$ac_ct_LIPO"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 +-printf "%s\n" "$ac_ct_LIPO" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 ++$as_echo "$ac_ct_LIPO" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + if test "x$ac_ct_LIPO" = x; then +@@ -10180,8 +8448,8 @@ fi + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + LIPO=$ac_ct_LIPO +@@ -10193,12 +8461,11 @@ fi + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. + set dummy ${ac_tool_prefix}otool; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_OTOOL+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_OTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$OTOOL"; then + ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. + else +@@ -10206,15 +8473,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL="${ac_tool_prefix}otool" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -10225,11 +8488,11 @@ fi + fi + OTOOL=$ac_cv_prog_OTOOL + if test -n "$OTOOL"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 +-printf "%s\n" "$OTOOL" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 ++$as_echo "$OTOOL" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -10238,12 +8501,11 @@ if test -z "$ac_cv_prog_OTOOL"; then + ac_ct_OTOOL=$OTOOL + # Extract the first word of "otool", so it can be a program name with args. + set dummy otool; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_OTOOL+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_OTOOL"; then + ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. + else +@@ -10251,15 +8513,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL="otool" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -10270,11 +8528,11 @@ fi + fi + ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL + if test -n "$ac_ct_OTOOL"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 +-printf "%s\n" "$ac_ct_OTOOL" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 ++$as_echo "$ac_ct_OTOOL" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + if test "x$ac_ct_OTOOL" = x; then +@@ -10282,8 +8540,8 @@ fi + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + OTOOL=$ac_ct_OTOOL +@@ -10295,12 +8553,11 @@ fi + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. + set dummy ${ac_tool_prefix}otool64; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_OTOOL64+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_OTOOL64+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$OTOOL64"; then + ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. + else +@@ -10308,15 +8565,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -10327,11 +8580,11 @@ fi + fi + OTOOL64=$ac_cv_prog_OTOOL64 + if test -n "$OTOOL64"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 +-printf "%s\n" "$OTOOL64" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 ++$as_echo "$OTOOL64" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -10340,12 +8593,11 @@ if test -z "$ac_cv_prog_OTOOL64"; then + ac_ct_OTOOL64=$OTOOL64 + # Extract the first word of "otool64", so it can be a program name with args. + set dummy otool64; ac_word=$2 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +-printf %s "checking for $ac_word... " >&6; } +-if test ${ac_cv_prog_ac_ct_OTOOL64+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -n "$ac_ct_OTOOL64"; then + ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. + else +@@ -10353,15 +8605,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac ++ test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do +- if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL64="otool64" +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi + done +@@ -10372,11 +8620,11 @@ fi + fi + ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 + if test -n "$ac_ct_OTOOL64"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 +-printf "%s\n" "$ac_ct_OTOOL64" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 ++$as_echo "$ac_ct_OTOOL64" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + if test "x$ac_ct_OTOOL64" = x; then +@@ -10384,8 +8632,8 @@ fi + else + case $cross_compiling:$ac_tool_warned in + yes:) +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} + ac_tool_warned=yes ;; + esac + OTOOL64=$ac_ct_OTOOL64 +@@ -10420,14 +8668,13 @@ fi + + + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 +-printf %s "checking for -single_module linker flag... " >&6; } +-if test ${lt_cv_apple_cc_single_mod+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 ++$as_echo_n "checking for -single_module linker flag... " >&6; } ++if ${lt_cv_apple_cc_single_mod+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_apple_cc_single_mod=no +- if test -z "$LT_MULTI_MODULE"; then ++ if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the +@@ -10439,13 +8686,7 @@ else $as_nop + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? +- # If there is a non-empty error log, and "single_module" +- # appears in it, assume the flag caused a linker warning +- if test -s conftest.err && $GREP single_module conftest.err; then +- cat conftest.err >&5 +- # Otherwise, if the output was created with a 0 exit code from +- # the compiler, it worked. +- elif test -f libconftest.dylib && test 0 = "$_lt_result"; then ++ if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&5 +@@ -10454,15 +8695,13 @@ else $as_nop + rm -f conftest.* + fi + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 +-printf "%s\n" "$lt_cv_apple_cc_single_mod" >&6; } +- +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 +-printf %s "checking for -exported_symbols_list linker flag... " >&6; } +-if test ${lt_cv_ld_exported_symbols_list+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 ++$as_echo "$lt_cv_apple_cc_single_mod" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 ++$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } ++if ${lt_cv_ld_exported_symbols_list+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym +@@ -10471,52 +8710,45 @@ else $as_nop + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_link "$LINENO" +-then : ++if ac_fn_c_try_link "$LINENO"; then : + lt_cv_ld_exported_symbols_list=yes +-else $as_nop ++else + lt_cv_ld_exported_symbols_list=no + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +- LDFLAGS=$save_LDFLAGS ++ LDFLAGS="$save_LDFLAGS" + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 +-printf "%s\n" "$lt_cv_ld_exported_symbols_list" >&6; } +- +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 +-printf %s "checking for -force_load linker flag... " >&6; } +-if test ${lt_cv_ld_force_load+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 ++$as_echo "$lt_cv_ld_exported_symbols_list" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 ++$as_echo_n "checking for -force_load linker flag... " >&6; } ++if ${lt_cv_ld_force_load+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF + int forced_loaded() { return 2;} + _LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 +- echo "$AR $AR_FLAGS libconftest.a conftest.o" >&5 +- $AR $AR_FLAGS libconftest.a conftest.o 2>&5 +- echo "$RANLIB libconftest.a" >&5 +- $RANLIB libconftest.a 2>&5 ++ echo "$AR cru libconftest.a conftest.o" >&5 ++ $AR cru libconftest.a conftest.o 2>&5 + cat > conftest.c << _LT_EOF + int main() { return 0;} + _LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? +- if test -s conftest.err && $GREP force_load conftest.err; then +- cat conftest.err >&5 +- elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then ++ if test -f conftest && test ! -s conftest.err && test $_lt_result = 0 && $GREP forced_load conftest 2>&1 >/dev/null; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&5 +@@ -10525,31 +8757,38 @@ _LT_EOF + rm -rf conftest.dSYM + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 +-printf "%s\n" "$lt_cv_ld_force_load" >&6; } +- case $host_os in +- rhapsody* | darwin1.[012]) +- _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; +- darwin1.*) +- _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; +- darwin*) +- case $MACOSX_DEPLOYMENT_TARGET,$host in +- 10.[012],*|,*powerpc*-darwin[5-8]*) +- _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; +- *) +- _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; +- esac ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 ++$as_echo "$lt_cv_ld_force_load" >&6; } ++ # Allow for Darwin 4-7 (macOS 10.0-10.3) although these are not expect to ++ # build without first building modern cctools / linker. ++ case $host_cpu-$host_os in ++ *-rhapsody* | *-darwin1.[012]) ++ _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; ++ *-darwin1.*) ++ _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; ++ *-darwin*) ++ # darwin 5.x (macOS 10.1) onwards we only need to adjust when the ++ # deployment target is forced to an earlier version. ++ case ${MACOSX_DEPLOYMENT_TARGET-UNSET},$host in ++ UNSET,*-darwin[89]*|UNSET,*-darwin[12][0123456789]*) ++ ;; ++ 10.[012][,.]*) ++ _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ++ ;; ++ *) ++ ;; ++ esac + ;; + esac +- if test yes = "$lt_cv_apple_cc_single_mod"; then ++ if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi +- if test yes = "$lt_cv_ld_exported_symbols_list"; then +- _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' ++ if test "$lt_cv_ld_exported_symbols_list" = "yes"; then ++ _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else +- _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' ++ _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi +- if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then ++ if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= +@@ -10557,61 +8796,21 @@ printf "%s\n" "$lt_cv_ld_force_load" >&6; } + ;; + esac + +-# func_munge_path_list VARIABLE PATH +-# ----------------------------------- +-# VARIABLE is name of variable containing _space_ separated list of +-# directories to be munged by the contents of PATH, which is string +-# having a format: +-# "DIR[:DIR]:" +-# string "DIR[ DIR]" will be prepended to VARIABLE +-# ":DIR[:DIR]" +-# string "DIR[ DIR]" will be appended to VARIABLE +-# "DIRP[:DIRP]::[DIRA:]DIRA" +-# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +-# "DIRA[ DIRA]" will be appended to VARIABLE +-# "DIR[:DIR]" +-# VARIABLE will be replaced by "DIR[ DIR]" +-func_munge_path_list () +-{ +- case x$2 in +- x) +- ;; +- *:) +- eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" +- ;; +- x:*) +- eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" +- ;; +- *::*) +- eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" +- eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" +- ;; +- *) +- eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" +- ;; +- esac +-} +- +-ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default ++for ac_header in dlfcn.h ++do : ++ ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default + " +-if test "x$ac_cv_header_dlfcn_h" = xyes +-then : +- printf "%s\n" "#define HAVE_DLFCN_H 1" >>confdefs.h ++if test "x$ac_cv_header_dlfcn_h" = xyes; then : ++ cat >>confdefs.h <<_ACEOF ++#define HAVE_DLFCN_H 1 ++_ACEOF + + fi + ++done + + + +-func_stripname_cnf () +-{ +- case $2 in +- .*) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%\\\\$2\$%%"`;; +- *) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%$2\$%%"`;; +- esac +-} # func_stripname_cnf +- +- + + + +@@ -10626,8 +8825,7 @@ func_stripname_cnf () + + + # Check whether --enable-shared was given. +-if test ${enable_shared+y} +-then : ++if test "${enable_shared+set}" = set; then : + enableval=$enable_shared; p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; +@@ -10635,17 +8833,17 @@ then : + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. +- lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, ++ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do +- IFS=$lt_save_ifs ++ IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done +- IFS=$lt_save_ifs ++ IFS="$lt_save_ifs" + ;; + esac +-else $as_nop ++else + enable_shared=yes + fi + +@@ -10658,8 +8856,7 @@ fi + + + # Check whether --enable-static was given. +-if test ${enable_static+y} +-then : ++if test "${enable_static+set}" = set; then : + enableval=$enable_static; p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; +@@ -10667,17 +8864,17 @@ then : + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. +- lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, ++ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do +- IFS=$lt_save_ifs ++ IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done +- IFS=$lt_save_ifs ++ IFS="$lt_save_ifs" + ;; + esac +-else $as_nop ++else + enable_static=yes + fi + +@@ -10691,29 +8888,15 @@ fi + + + # Check whether --with-pic was given. +-if test ${with_pic+y} +-then : +- withval=$with_pic; lt_p=${PACKAGE-default} +- case $withval in +- yes|no) pic_mode=$withval ;; +- *) +- pic_mode=default +- # Look at the argument we got. We use all the common list separators. +- lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, +- for lt_pkg in $withval; do +- IFS=$lt_save_ifs +- if test "X$lt_pkg" = "X$lt_p"; then +- pic_mode=yes +- fi +- done +- IFS=$lt_save_ifs +- ;; +- esac +-else $as_nop ++if test "${with_pic+set}" = set; then : ++ withval=$with_pic; pic_mode="$withval" ++else + pic_mode=default + fi + + ++test -z "$pic_mode" && pic_mode=default ++ + + + +@@ -10721,8 +8904,7 @@ fi + + + # Check whether --enable-fast-install was given. +-if test ${enable_fast_install+y} +-then : ++if test "${enable_fast_install+set}" = set; then : + enableval=$enable_fast_install; p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; +@@ -10730,17 +8912,17 @@ then : + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. +- lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, ++ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do +- IFS=$lt_save_ifs ++ IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done +- IFS=$lt_save_ifs ++ IFS="$lt_save_ifs" + ;; + esac +-else $as_nop ++else + enable_fast_install=yes + fi + +@@ -10751,65 +8933,11 @@ fi + + + +- shared_archive_member_spec= +-case $host,$enable_shared in +-power*-*-aix[5-9]*,yes) +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5 +-printf %s "checking which variant of shared library versioning to provide... " >&6; } +- +-# Check whether --with-aix-soname was given. +-if test ${with_aix_soname+y} +-then : +- withval=$with_aix_soname; case $withval in +- aix|svr4|both) +- ;; +- *) +- as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5 +- ;; +- esac +- lt_cv_with_aix_soname=$with_aix_soname +-else $as_nop +- if test ${lt_cv_with_aix_soname+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- lt_cv_with_aix_soname=aix +-fi +- +- with_aix_soname=$lt_cv_with_aix_soname +-fi +- +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5 +-printf "%s\n" "$with_aix_soname" >&6; } +- if test aix != "$with_aix_soname"; then +- # For the AIX way of multilib, we name the shared archive member +- # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', +- # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. +- # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, +- # the AIX toolchain works better with OBJECT_MODE set (default 32). +- if test 64 = "${OBJECT_MODE-32}"; then +- shared_archive_member_spec=shr_64 +- else +- shared_archive_member_spec=shr +- fi +- fi +- ;; +-*) +- with_aix_soname=aix +- ;; +-esac +- +- +- +- +- +- +- + + + + # This can be used to rebuild libtool when needed +-LIBTOOL_DEPS=$ltmain ++LIBTOOL_DEPS="$ltmain" + + # Always use our own libtool. + LIBTOOL='$(SHELL) $(top_builddir)/libtool' +@@ -10834,10 +8962,6 @@ LIBTOOL='$(SHELL) $(top_builddir)/libtool' + + + +- +- +- +- + + + +@@ -10858,16 +8982,15 @@ test -z "$LN_S" && LN_S="ln -s" + + + +-if test -n "${ZSH_VERSION+set}"; then ++if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 +-printf %s "checking for objdir... " >&6; } +-if test ${lt_cv_objdir+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 ++$as_echo_n "checking for objdir... " >&6; } ++if ${lt_cv_objdir+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + rm -f .libs 2>/dev/null + mkdir .libs 2>/dev/null + if test -d .libs; then +@@ -10878,15 +9001,17 @@ else + fi + rmdir .libs 2>/dev/null + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 +-printf "%s\n" "$lt_cv_objdir" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 ++$as_echo "$lt_cv_objdir" >&6; } + objdir=$lt_cv_objdir + + + + + +-printf "%s\n" "#define LT_OBJDIR \"$lt_cv_objdir/\"" >>confdefs.h ++cat >>confdefs.h <<_ACEOF ++#define LT_OBJDIR "$lt_cv_objdir/" ++_ACEOF + + + +@@ -10896,7 +9021,7 @@ aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. +- if test set != "${COLLECT_NAMES+set}"; then ++ if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi +@@ -10907,14 +9032,14 @@ esac + ofile=libtool + can_build_shared=yes + +-# All known linkers require a '.a' archive for static linking (except MSVC and +-# ICC, which need '.lib'). ++# All known linkers require a `.a' archive for static linking (except MSVC, ++# which needs '.lib'). + libext=a + +-with_gnu_ld=$lt_cv_prog_gnu_ld ++with_gnu_ld="$lt_cv_prog_gnu_ld" + +-old_CC=$CC +-old_CFLAGS=$CFLAGS ++old_CC="$CC" ++old_CFLAGS="$CFLAGS" + + # Set sane defaults for various variables + test -z "$CC" && CC=cc +@@ -10923,8 +9048,15 @@ test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS + test -z "$LD" && LD=ld + test -z "$ac_objext" && ac_objext=o + +-func_cc_basename $compiler +-cc_basename=$func_cc_basename_result ++for cc_temp in $compiler""; do ++ case $cc_temp in ++ compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; ++ distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; ++ \-*) ;; ++ *) break;; ++ esac ++done ++cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` + + + # Only perform the check for file, if the check method requires it +@@ -10932,30 +9064,29 @@ test -z "$MAGIC_CMD" && MAGIC_CMD=file + case $deplibs_check_method in + file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 +-printf %s "checking for ${ac_tool_prefix}file... " >&6; } +-if test ${lt_cv_path_MAGIC_CMD+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 ++$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } ++if ${lt_cv_path_MAGIC_CMD+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + case $MAGIC_CMD in + [\\/*] | ?:[\\/]*) +- lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. ++ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; + *) +- lt_save_MAGIC_CMD=$MAGIC_CMD +- lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ++ lt_save_MAGIC_CMD="$MAGIC_CMD" ++ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do +- IFS=$lt_save_ifs ++ IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. +- if test -f "$ac_dir/${ac_tool_prefix}file"; then +- lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file" ++ if test -f $ac_dir/${ac_tool_prefix}file; then ++ lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` +- MAGIC_CMD=$lt_cv_path_MAGIC_CMD ++ MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : +@@ -10978,19 +9109,19 @@ _LT_EOF + break + fi + done +- IFS=$lt_save_ifs +- MAGIC_CMD=$lt_save_MAGIC_CMD ++ IFS="$lt_save_ifs" ++ MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; + esac + fi + +-MAGIC_CMD=$lt_cv_path_MAGIC_CMD ++MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if test -n "$MAGIC_CMD"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +-printf "%s\n" "$MAGIC_CMD" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 ++$as_echo "$MAGIC_CMD" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -10999,30 +9130,29 @@ fi + + if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for file" >&5 +-printf %s "checking for file... " >&6; } +-if test ${lt_cv_path_MAGIC_CMD+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 ++$as_echo_n "checking for file... " >&6; } ++if ${lt_cv_path_MAGIC_CMD+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + case $MAGIC_CMD in + [\\/*] | ?:[\\/]*) +- lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. ++ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; + *) +- lt_save_MAGIC_CMD=$MAGIC_CMD +- lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ++ lt_save_MAGIC_CMD="$MAGIC_CMD" ++ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do +- IFS=$lt_save_ifs ++ IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. +- if test -f "$ac_dir/file"; then +- lt_cv_path_MAGIC_CMD=$ac_dir/"file" ++ if test -f $ac_dir/file; then ++ lt_cv_path_MAGIC_CMD="$ac_dir/file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` +- MAGIC_CMD=$lt_cv_path_MAGIC_CMD ++ MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : +@@ -11045,19 +9175,19 @@ _LT_EOF + break + fi + done +- IFS=$lt_save_ifs +- MAGIC_CMD=$lt_save_MAGIC_CMD ++ IFS="$lt_save_ifs" ++ MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; + esac + fi + +-MAGIC_CMD=$lt_cv_path_MAGIC_CMD ++MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if test -n "$MAGIC_CMD"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +-printf "%s\n" "$MAGIC_CMD" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 ++$as_echo "$MAGIC_CMD" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + + +@@ -11072,7 +9202,7 @@ esac + + # Use C for the default configuration in the libtool script + +-lt_save_CC=$CC ++lt_save_CC="$CC" + ac_ext=c + ac_cpp='$CPP $CPPFLAGS' + ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +@@ -11126,11 +9256,15 @@ _lt_linker_boilerplate=`cat conftest.err` + $RM -r conftest* + + ++## CAVEAT EMPTOR: ++## There is no encapsulation within the following macros, do not change ++## the running order or otherwise move them around unless you know exactly ++## what you are doing... + if test -n "$compiler"; then + + lt_prog_compiler_no_builtin_flag= + +-if test yes = "$GCC"; then ++if test "$GCC" = yes; then + case $cc_basename in + nvcc*) + lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; +@@ -11138,16 +9272,15 @@ if test yes = "$GCC"; then + lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; + esac + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +-printf %s "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } +-if test ${lt_cv_prog_compiler_rtti_exceptions+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 ++$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } ++if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext +- lt_compiler_flag="-fno-rtti -fno-exceptions" ## exclude from sc_useless_quotes_in_assignment ++ lt_compiler_flag="-fno-rtti -fno-exceptions" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins +@@ -11174,10 +9307,10 @@ else $as_nop + $RM conftest* + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +-printf "%s\n" "$lt_cv_prog_compiler_rtti_exceptions" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 ++$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } + +-if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then ++if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then + lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" + else + : +@@ -11194,15 +9327,17 @@ fi + lt_prog_compiler_pic= + lt_prog_compiler_static= + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 ++$as_echo_n "checking for $compiler option to produce PIC... " >&6; } + +- if test yes = "$GCC"; then ++ if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_static='-static' + + case $host_os in + aix*) + # All AIX code is PIC. +- if test ia64 = "$host_cpu"; then ++ if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + fi +@@ -11217,8 +9352,8 @@ lt_prog_compiler_static= + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but +- # adding the '-m68020' flag to GCC prevents building anything better, +- # like '-m68040'. ++ # adding the `-m68020' flag to GCC prevents building anything better, ++ # like `-m68040'. + lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' + ;; + esac +@@ -11234,11 +9369,6 @@ lt_prog_compiler_static= + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic='-DDLL_EXPORT' +- case $host_os in +- os2*) +- lt_prog_compiler_static='$wl-static' +- ;; +- esac + ;; + + darwin* | rhapsody*) +@@ -11299,9 +9429,7 @@ lt_prog_compiler_static= + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + lt_prog_compiler_wl='-Xlinker ' +- if test -n "$lt_prog_compiler_pic"; then +- lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" +- fi ++ lt_prog_compiler_pic='-Xcompiler -fPIC' + ;; + esac + else +@@ -11309,7 +9437,7 @@ lt_prog_compiler_static= + case $host_os in + aix*) + lt_prog_compiler_wl='-Wl,' +- if test ia64 = "$host_cpu"; then ++ if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + else +@@ -11317,29 +9445,10 @@ lt_prog_compiler_static= + fi + ;; + +- darwin* | rhapsody*) +- # PIC is the default on this platform +- # Common symbols not allowed in MH_DYLIB files +- lt_prog_compiler_pic='-fno-common' +- case $cc_basename in +- nagfor*) +- # NAG Fortran compiler +- lt_prog_compiler_wl='-Wl,-Wl,,' +- lt_prog_compiler_pic='-PIC' +- lt_prog_compiler_static='-Bstatic' +- ;; +- esac +- ;; +- + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' +- case $host_os in +- os2*) +- lt_prog_compiler_static='$wl-static' +- ;; +- esac + ;; + + hpux9* | hpux10* | hpux11*) +@@ -11355,7 +9464,7 @@ lt_prog_compiler_static= + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? +- lt_prog_compiler_static='$wl-a ${wl}archive' ++ lt_prog_compiler_static='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) +@@ -11364,9 +9473,9 @@ lt_prog_compiler_static= + lt_prog_compiler_static='-non_shared' + ;; + +- linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in +- # old Intel for x86_64, which still supported -KPIC. ++ # old Intel for x86_64 which still supported -KPIC. + ecc*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' +@@ -11385,18 +9494,6 @@ lt_prog_compiler_static= + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; +- nagfor*) +- # NAG Fortran compiler +- lt_prog_compiler_wl='-Wl,-Wl,,' +- lt_prog_compiler_pic='-PIC' +- lt_prog_compiler_static='-Bstatic' +- ;; +- tcc*) +- # Fabrice Bellard et al's Tiny C Compiler +- lt_prog_compiler_wl='-Wl,' +- lt_prog_compiler_pic='-fPIC' +- lt_prog_compiler_static='-static' +- ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) +@@ -11416,34 +9513,19 @@ lt_prog_compiler_static= + lt_prog_compiler_static='-qstaticlink' + ;; + *) +- case `$CC -V 2>&1 | $SED 5q` in +- *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) ++ case `$CC -V 2>&1 | sed 5q` in ++ *Sun\ F* | *Sun*Fortran*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='' + ;; +- *Sun\ F* | *Sun*Fortran*) +- lt_prog_compiler_pic='-KPIC' +- lt_prog_compiler_static='-Bstatic' +- lt_prog_compiler_wl='-Qoption ld ' +- ;; + *Sun\ C*) + # Sun C 5.9 + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Wl,' + ;; +- *Intel*\ [CF]*Compiler*) +- lt_prog_compiler_wl='-Wl,' +- lt_prog_compiler_pic='-fPIC' +- lt_prog_compiler_static='-static' +- ;; +- *Portland\ Group*) +- lt_prog_compiler_wl='-Wl,' +- lt_prog_compiler_pic='-fpic' +- lt_prog_compiler_static='-Bstatic' +- ;; + esac + ;; + esac +@@ -11474,7 +9556,7 @@ lt_prog_compiler_static= + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in +- f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) ++ f77* | f90* | f95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; +@@ -11494,7 +9576,7 @@ lt_prog_compiler_static= + ;; + + sysv4*MP*) +- if test -d /usr/nec; then ++ if test -d /usr/nec ;then + lt_prog_compiler_pic='-Kconform_pic' + lt_prog_compiler_static='-Bstatic' + fi +@@ -11523,7 +9605,7 @@ lt_prog_compiler_static= + fi + + case $host_os in +- # For platforms that do not support PIC, -DPIC is meaningless: ++ # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic= + ;; +@@ -11531,33 +9613,27 @@ case $host_os in + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; + esac ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 ++$as_echo "$lt_prog_compiler_pic" >&6; } ++ ++ ++ ++ + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +-printf %s "checking for $compiler option to produce PIC... " >&6; } +-if test ${lt_cv_prog_compiler_pic+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- lt_cv_prog_compiler_pic=$lt_prog_compiler_pic +-fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 +-printf "%s\n" "$lt_cv_prog_compiler_pic" >&6; } +-lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + + # + # Check to make sure the PIC flag actually works. + # + if test -n "$lt_prog_compiler_pic"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 +-printf %s "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } +-if test ${lt_cv_prog_compiler_pic_works+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 ++$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } ++if ${lt_cv_prog_compiler_pic_works+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_prog_compiler_pic_works=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext +- lt_compiler_flag="$lt_prog_compiler_pic -DPIC" ## exclude from sc_useless_quotes_in_assignment ++ lt_compiler_flag="$lt_prog_compiler_pic -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins +@@ -11584,10 +9660,10 @@ else $as_nop + $RM conftest* + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 +-printf "%s\n" "$lt_cv_prog_compiler_pic_works" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 ++$as_echo "$lt_cv_prog_compiler_pic_works" >&6; } + +-if test yes = "$lt_cv_prog_compiler_pic_works"; then ++if test x"$lt_cv_prog_compiler_pic_works" = xyes; then + case $lt_prog_compiler_pic in + "" | " "*) ;; + *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; +@@ -11604,23 +9680,17 @@ fi + + + +- +- +- +- +- + # + # Check to make sure the static flag actually works. + # + wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +-printf %s "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +-if test ${lt_cv_prog_compiler_static_works+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 ++$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } ++if ${lt_cv_prog_compiler_static_works+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_prog_compiler_static_works=no +- save_LDFLAGS=$LDFLAGS ++ save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then +@@ -11639,13 +9709,13 @@ else $as_nop + fi + fi + $RM -r conftest* +- LDFLAGS=$save_LDFLAGS ++ LDFLAGS="$save_LDFLAGS" + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 +-printf "%s\n" "$lt_cv_prog_compiler_static_works" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 ++$as_echo "$lt_cv_prog_compiler_static_works" >&6; } + +-if test yes = "$lt_cv_prog_compiler_static_works"; then ++if test x"$lt_cv_prog_compiler_static_works" = xyes; then + : + else + lt_prog_compiler_static= +@@ -11657,12 +9727,11 @@ fi + + + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +-printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +-if test ${lt_cv_prog_compiler_c_o+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 ++$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } ++if ${lt_cv_prog_compiler_c_o+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest +@@ -11705,20 +9774,19 @@ else $as_nop + $RM conftest* + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +-printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 ++$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + + + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +-printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +-if test ${lt_cv_prog_compiler_c_o+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 ++$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } ++if ${lt_cv_prog_compiler_c_o+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest +@@ -11761,28 +9829,28 @@ else $as_nop + $RM conftest* + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +-printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 ++$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + +-hard_links=nottested +-if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then ++hard_links="nottested" ++if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +-printf %s "checking if we can lock with hard links... " >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 ++$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +-printf "%s\n" "$hard_links" >&6; } +- if test no = "$hard_links"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 +-printf "%s\n" "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 ++$as_echo "$hard_links" >&6; } ++ if test "$hard_links" = no; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 ++$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi + else +@@ -11794,8 +9862,8 @@ fi + + + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +-printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 ++$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + runpath_var= + allow_undefined_flag= +@@ -11810,6 +9878,7 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries + hardcode_direct=no + hardcode_direct_absolute=no + hardcode_libdir_flag_spec= ++ hardcode_libdir_flag_spec_ld= + hardcode_libdir_separator= + hardcode_minus_L=no + hardcode_shlibpath_var=unsupported +@@ -11825,9 +9894,9 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries + # included in the symbol list + include_expsyms= + # exclude_expsyms can be an extended regexp of symbols to exclude +- # it will be wrapped by ' (' and ')$', so one must not match beginning or +- # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', +- # as well as any symbol that contains 'd'. ++ # it will be wrapped by ` (' and `)$', so one must not match beginning or ++ # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', ++ # as well as any symbol that contains `d'. + exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if +@@ -11839,18 +9908,18 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) +- # FIXME: the MSVC++ and ICC port hasn't been tested in a loooong time ++ # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using +- # Microsoft Visual C++ or Intel C++ Compiler. +- if test yes != "$GCC"; then ++ # Microsoft Visual C++. ++ if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) +- # we just hope/assume this is gcc and not c89 (= MSVC++ or ICC) ++ # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; +- openbsd* | bitrig*) ++ openbsd*) + with_gnu_ld=no + ;; + esac +@@ -11860,7 +9929,7 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no +- if test yes = "$with_gnu_ld"; then ++ if test "$with_gnu_ld" = yes; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility +@@ -11882,24 +9951,24 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries + esac + fi + +- if test yes = "$lt_use_gnu_ld_interface"; then ++ if test "$lt_use_gnu_ld_interface" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty +- wlarc='$wl' ++ wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH +- hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' +- export_dynamic_flag_spec='$wl--export-dynamic' ++ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' ++ export_dynamic_flag_spec='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then +- whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' ++ whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec= + fi + supports_anon_versioning=no +- case `$LD -v | $SED -e 's/([^)]\+)\s\+//' 2>&1` in ++ case `$LD -v 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... +@@ -11912,7 +9981,7 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries + case $host_os in + aix[3-9]*) + # On AIX/PPC, the GNU linker is very broken +- if test ia64 != "$host_cpu"; then ++ if test "$host_cpu" != ia64; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +@@ -11931,7 +10000,7 @@ _LT_EOF + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) +@@ -11947,7 +10016,7 @@ _LT_EOF + allow_undefined_flag=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME +- archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs=no + fi +@@ -11957,98 +10026,68 @@ _LT_EOF + # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' +- export_dynamic_flag_spec='$wl--export-all-symbols' ++ export_dynamic_flag_spec='${wl}--export-all-symbols' + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes +- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' +- exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +- # If the export-symbols file already is a .def file, use it as +- # is; otherwise, prepend EXPORTS... +- archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then +- cp $export_symbols $output_objdir/$soname.def; +- else +- echo EXPORTS > $output_objdir/$soname.def; +- cat $export_symbols >> $output_objdir/$soname.def; +- fi~ +- $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ # If the export-symbols file already is a .def file (1st line ++ # is EXPORTS), use it as is; otherwise, prepend... ++ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ cp $export_symbols $output_objdir/$soname.def; ++ else ++ echo EXPORTS > $output_objdir/$soname.def; ++ cat $export_symbols >> $output_objdir/$soname.def; ++ fi~ ++ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs=no + fi + ;; + + haiku*) +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + link_all_deplibs=yes + ;; + +- os2*) +- hardcode_libdir_flag_spec='-L$libdir' +- hardcode_minus_L=yes +- allow_undefined_flag=unsupported +- shrext_cmds=.dll +- archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ +- $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ +- $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ +- $ECHO EXPORTS >> $output_objdir/$libname.def~ +- emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ +- $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ +- emximp -o $lib $output_objdir/$libname.def' +- archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ +- $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ +- $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ +- $ECHO EXPORTS >> $output_objdir/$libname.def~ +- prefix_cmds="$SED"~ +- if test EXPORTS = "`$SED 1q $export_symbols`"; then +- prefix_cmds="$prefix_cmds -e 1d"; +- fi~ +- prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ +- cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ +- $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ +- emximp -o $lib $output_objdir/$libname.def' +- old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' +- enable_shared_with_static_runtimes=yes +- file_list_spec='@' +- ;; +- + interix[3-9]*) + hardcode_direct=no + hardcode_shlibpath_var=no +- hardcode_libdir_flag_spec='$wl-rpath,$libdir' +- export_dynamic_flag_spec='$wl-E' ++ hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ++ export_dynamic_flag_spec='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. +- archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' +- archive_expsym_cmds='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ++ archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + +- gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) ++ gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu | uclinuxfdpiceabi) + tmp_diet=no +- if test linux-dietlibc = "$host_os"; then ++ if test "$host_os" = linux-dietlibc; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ +- && test no = "$tmp_diet" ++ && test "$tmp_diet" = no + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler +- whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers +- whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; +@@ -12059,47 +10098,43 @@ _LT_EOF + lf95*) # Lahey Fortran 8.1 + whole_archive_flag_spec= + tmp_sharedflag='--shared' ;; +- nagfor*) # NAGFOR 5.3 +- tmp_sharedflag='-Wl,-shared' ;; + xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 +- whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object=yes + ;; + esac +- case `$CC -V 2>&1 | $SED 5q` in ++ case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 +- whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac +- archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + +- if test yes = "$supports_anon_versioning"; then ++ if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ +- cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +- echo "local: *; };" >> $output_objdir/$libname.ver~ +- $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' ++ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ ++ echo "local: *; };" >> $output_objdir/$libname.ver~ ++ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in +- tcc*) +- export_dynamic_flag_spec='-rdynamic' +- ;; + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' +- hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' +- archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' +- if test yes = "$supports_anon_versioning"; then ++ hardcode_libdir_flag_spec= ++ hardcode_libdir_flag_spec_ld='-rpath $libdir' ++ archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' ++ if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ +- cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +- echo "local: *; };" >> $output_objdir/$libname.ver~ +- $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' ++ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ ++ echo "local: *; };" >> $output_objdir/$libname.ver~ ++ $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac +@@ -12113,8 +10148,8 @@ _LT_EOF + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else +- archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + +@@ -12132,8 +10167,8 @@ _LT_EOF + + _LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -12145,7 +10180,7 @@ _LT_EOF + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +-*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot ++*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not + *** reliably create shared libraries on SCO systems. Therefore, libtool + *** is disabling shared libraries support. We urge you to upgrade GNU + *** binutils to release 2.16.91.0.3 or newer. Another option is to modify +@@ -12160,9 +10195,9 @@ _LT_EOF + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -12179,15 +10214,15 @@ _LT_EOF + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + +- if test no = "$ld_shlibs"; then ++ if test "$ld_shlibs" = no; then + runpath_var= + hardcode_libdir_flag_spec= + export_dynamic_flag_spec= +@@ -12203,7 +10238,7 @@ _LT_EOF + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes +- if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then ++ if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported +@@ -12211,57 +10246,34 @@ _LT_EOF + ;; + + aix[4-9]*) +- if test ia64 = "$host_cpu"; then ++ if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' +- no_entry_flag= ++ no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. +- # -C means demangle to GNU nm, but means don't demangle to AIX nm. +- # Without the "-l" option, or with the "-B" option, AIX nm treats +- # weak defined symbols like other global defined symbols, whereas +- # GNU nm marks them as "W". +- # While the 'weak' keyword is ignored in the Export File, we need +- # it in the Import File for the 'aix-soname' feature, so we have +- # to replace the "-B" option with "-P" for AIX nm. ++ # -C means demangle to AIX nm, but means don't demangle with GNU nm ++ # Also, AIX nm treats weak defined symbols like other global ++ # defined symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then +- export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' ++ export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else +- export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' ++ export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we +- # have runtime linking enabled, and use it for executables. +- # For shared libraries, we enable/disable runtime linking +- # depending on the kind of the shared library created - +- # when "with_aix_soname,aix_use_runtimelinking" is: +- # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables +- # "aix,yes" lib.so shared, rtl:yes, for executables +- # lib.a static archive +- # "both,no" lib.so.V(shr.o) shared, rtl:yes +- # lib.a(lib.so.V) shared, rtl:no, for executables +- # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables +- # lib.a(lib.so.V) shared, rtl:no +- # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables +- # lib.a static archive ++ # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do +- if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then ++ if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done +- if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then +- # With aix-soname=svr4, we create the lib.so.V shared archives only, +- # so we don't have lib.a shared libs to link our executables. +- # We have to force runtime linking in this case. +- aix_use_runtimelinking=yes +- LDFLAGS="$LDFLAGS -Wl,-brtl" +- fi + ;; + esac + +@@ -12280,21 +10292,13 @@ _LT_EOF + hardcode_direct_absolute=yes + hardcode_libdir_separator=':' + link_all_deplibs=yes +- file_list_spec='$wl-f,' +- case $with_aix_soname,$aix_use_runtimelinking in +- aix,*) ;; # traditional, no import file +- svr4,* | *,yes) # use import file +- # The Import File defines what to hardcode. +- hardcode_direct=no +- hardcode_direct_absolute=no +- ;; +- esac ++ file_list_spec='${wl}-f,' + +- if test yes = "$GCC"; then ++ if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ +- collect2name=`$CC -print-prog-name=collect2` ++ collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then +@@ -12313,168 +10317,119 @@ _LT_EOF + ;; + esac + shared_flag='-shared' +- if test yes = "$aix_use_runtimelinking"; then +- shared_flag="$shared_flag "'$wl-G' ++ if test "$aix_use_runtimelinking" = yes; then ++ shared_flag="$shared_flag "'${wl}-G' + fi +- # Need to ensure runtime linking is disabled for the traditional +- # shared library, or the linker may eventually find shared libraries +- # /with/ Import File - we do not want to mix them. +- shared_flag_aix='-shared' +- shared_flag_svr4='-shared $wl-G' + else + # not using gcc +- if test ia64 = "$host_cpu"; then ++ if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else +- if test yes = "$aix_use_runtimelinking"; then +- shared_flag='$wl-G' ++ if test "$aix_use_runtimelinking" = yes; then ++ shared_flag='${wl}-G' + else +- shared_flag='$wl-bM:SRE' ++ shared_flag='${wl}-bM:SRE' + fi +- shared_flag_aix='$wl-bM:SRE' +- shared_flag_svr4='$wl-G' + fi + fi + +- export_dynamic_flag_spec='$wl-bexpall' ++ export_dynamic_flag_spec='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols=yes +- if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then ++ if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. +- if test set = "${lt_cv_aix_libpath+set}"; then +- aix_libpath=$lt_cv_aix_libpath +-else +- if test ${lt_cv_aix_libpath_+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_link "$LINENO" +-then : +- +- lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\([^ ]*\) *$/\1/ +- p +- } +- }' +- lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +- # Check for a 64-bit object if we didn't find anything. +- if test -z "$lt_cv_aix_libpath_"; then +- lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +- fi +-fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ +- conftest$ac_exeext conftest.$ac_ext +- if test -z "$lt_cv_aix_libpath_"; then +- lt_cv_aix_libpath_=/usr/lib:/lib +- fi ++if ac_fn_c_try_link "$LINENO"; then : + ++lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\(.*\)$/\1/ ++ p ++ } ++ }' ++aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++# Check for a 64-bit object if we didn't find anything. ++if test -z "$aix_libpath"; then ++ aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +- +- aix_libpath=$lt_cv_aix_libpath_ + fi ++rm -f core conftest.err conftest.$ac_objext \ ++ conftest$ac_exeext conftest.$ac_ext ++if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + +- hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" +- archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag ++ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" ++ archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else +- if test ia64 = "$host_cpu"; then +- hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib' ++ if test "$host_cpu" = ia64; then ++ hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag="-z nodefs" +- archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" ++ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. +- if test set = "${lt_cv_aix_libpath+set}"; then +- aix_libpath=$lt_cv_aix_libpath +-else +- if test ${lt_cv_aix_libpath_+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_link "$LINENO" +-then : +- +- lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\([^ ]*\) *$/\1/ +- p +- } +- }' +- lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +- # Check for a 64-bit object if we didn't find anything. +- if test -z "$lt_cv_aix_libpath_"; then +- lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +- fi +-fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ +- conftest$ac_exeext conftest.$ac_ext +- if test -z "$lt_cv_aix_libpath_"; then +- lt_cv_aix_libpath_=/usr/lib:/lib +- fi ++if ac_fn_c_try_link "$LINENO"; then : + ++lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\(.*\)$/\1/ ++ p ++ } ++ }' ++aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++# Check for a 64-bit object if we didn't find anything. ++if test -z "$aix_libpath"; then ++ aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +- +- aix_libpath=$lt_cv_aix_libpath_ + fi ++rm -f core conftest.err conftest.$ac_objext \ ++ conftest$ac_exeext conftest.$ac_ext ++if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + +- hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" ++ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. +- no_undefined_flag=' $wl-bernotok' +- allow_undefined_flag=' $wl-berok' +- if test yes = "$with_gnu_ld"; then ++ no_undefined_flag=' ${wl}-bernotok' ++ allow_undefined_flag=' ${wl}-berok' ++ if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. +- whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive' ++ whole_archive_flag_spec='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec='$convenience' + fi + archive_cmds_need_lc=yes +- archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' +- # -brtl affects multiple linker settings, -berok does not and is overridden later +- compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' +- if test svr4 != "$with_aix_soname"; then +- # This is similar to how AIX traditionally builds its shared libraries. +- archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' +- fi +- if test aix != "$with_aix_soname"; then +- archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' +- else +- # used by -dlpreopen to get the symbols +- archive_expsym_cmds="$archive_expsym_cmds"'~$MV $output_objdir/$realname.d/$soname $output_objdir' +- fi +- archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d' ++ # This is similar to how AIX traditionally builds its shared libraries. ++ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; +@@ -12483,7 +10438,7 @@ fi + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) +@@ -12500,68 +10455,23 @@ fi + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using +- # Microsoft Visual C++ or Intel C++ Compiler. ++ # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. +- case $cc_basename in +- cl* | icl*) +- # Native MSVC or ICC +- hardcode_libdir_flag_spec=' ' +- allow_undefined_flag=unsupported +- always_export_symbols=yes +- file_list_spec='@' +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=.dll +- # FIXME: Setting linknames here is a bad hack. +- archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' +- archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then +- cp "$export_symbols" "$output_objdir/$soname.def"; +- echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; +- else +- $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; +- fi~ +- $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ +- linknames=' +- # The linker will not automatically build a static lib if we build a DLL. +- # _LT_TAGVAR(old_archive_from_new_cmds, )='true' +- enable_shared_with_static_runtimes=yes +- exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' +- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' +- # Don't use ranlib +- old_postinstall_cmds='chmod 644 $oldlib' +- postlink_cmds='lt_outputfile="@OUTPUT@"~ +- lt_tool_outputfile="@TOOL_OUTPUT@"~ +- case $lt_outputfile in +- *.exe|*.EXE) ;; +- *) +- lt_outputfile=$lt_outputfile.exe +- lt_tool_outputfile=$lt_tool_outputfile.exe +- ;; +- esac~ +- if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then +- $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; +- $RM "$lt_outputfile.manifest"; +- fi' +- ;; +- *) +- # Assume MSVC and ICC wrapper +- hardcode_libdir_flag_spec=' ' +- allow_undefined_flag=unsupported +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=.dll +- # FIXME: Setting linknames here is a bad hack. +- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' +- # The linker will automatically build a .lib file if we build a DLL. +- old_archive_from_new_cmds='true' +- # FIXME: Should let the user specify the lib program. +- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' +- enable_shared_with_static_runtimes=yes +- ;; +- esac ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' ++ # The linker will automatically build a .lib file if we build a DLL. ++ old_archive_from_new_cmds='true' ++ # FIXME: Should let the user specify the lib program. ++ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' ++ fix_srcfile_path='`cygpath -w "$srcfile"`' ++ enable_shared_with_static_runtimes=yes + ;; + + darwin* | rhapsody*) +@@ -12571,24 +10481,23 @@ fi + hardcode_direct=no + hardcode_automatic=yes + hardcode_shlibpath_var=unsupported +- if test yes = "$lt_cv_ld_force_load"; then +- whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' +- ++ if test "$lt_cv_ld_force_load" = "yes"; then ++ whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + else + whole_archive_flag_spec='' + fi + link_all_deplibs=yes +- allow_undefined_flag=$_lt_dar_allow_undefined ++ allow_undefined_flag="$_lt_dar_allow_undefined" + case $cc_basename in +- ifort*|nagfor*) _lt_dar_can_shared=yes ;; ++ ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac +- if test yes = "$_lt_dar_can_shared"; then ++ if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=func_echo_all +- archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" +- module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" +- archive_expsym_cmds="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" +- module_expsym_cmds="$SED -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" ++ archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" ++ module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" ++ archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" ++ module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + + else + ld_shlibs=no +@@ -12622,41 +10531,42 @@ fi + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. +- freebsd* | dragonfly* | midnightbsd*) +- archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ++ freebsd* | dragonfly*) ++ archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + hpux9*) +- if test yes = "$GCC"; then +- archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' ++ if test "$GCC" = yes; then ++ archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else +- archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' ++ archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi +- hardcode_libdir_flag_spec='$wl+b $wl$libdir' ++ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes +- export_dynamic_flag_spec='$wl-E' ++ export_dynamic_flag_spec='${wl}-E' + ;; + + hpux10*) +- if test yes,no = "$GCC,$with_gnu_ld"; then +- archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ if test "$GCC" = yes && test "$with_gnu_ld" = no; then ++ archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi +- if test no = "$with_gnu_ld"; then +- hardcode_libdir_flag_spec='$wl+b $wl$libdir' ++ if test "$with_gnu_ld" = no; then ++ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' ++ hardcode_libdir_flag_spec_ld='+b $libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + hardcode_direct_absolute=yes +- export_dynamic_flag_spec='$wl-E' ++ export_dynamic_flag_spec='${wl}-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes +@@ -12664,38 +10574,37 @@ fi + ;; + + hpux11*) +- if test yes,no = "$GCC,$with_gnu_ld"; then ++ if test "$GCC" = yes && test "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) +- archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) +- archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) +- archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) +- archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) +- archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 +-printf %s "checking if $CC understands -b... " >&6; } +-if test ${lt_cv_prog_compiler__b+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 ++$as_echo_n "checking if $CC understands -b... " >&6; } ++if ${lt_cv_prog_compiler__b+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_prog_compiler__b=no +- save_LDFLAGS=$LDFLAGS ++ save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -b" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then +@@ -12714,14 +10623,14 @@ else $as_nop + fi + fi + $RM -r conftest* +- LDFLAGS=$save_LDFLAGS ++ LDFLAGS="$save_LDFLAGS" + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 +-printf "%s\n" "$lt_cv_prog_compiler__b" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 ++$as_echo "$lt_cv_prog_compiler__b" >&6; } + +-if test yes = "$lt_cv_prog_compiler__b"; then +- archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++if test x"$lt_cv_prog_compiler__b" = xyes; then ++ archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi +@@ -12729,8 +10638,8 @@ fi + ;; + esac + fi +- if test no = "$with_gnu_ld"; then +- hardcode_libdir_flag_spec='$wl+b $wl$libdir' ++ if test "$with_gnu_ld" = no; then ++ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + + case $host_cpu in +@@ -12741,7 +10650,7 @@ fi + *) + hardcode_direct=yes + hardcode_direct_absolute=yes +- export_dynamic_flag_spec='$wl-E' ++ export_dynamic_flag_spec='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. +@@ -12752,60 +10661,35 @@ fi + ;; + + irix5* | irix6* | nonstopux*) +- if test yes = "$GCC"; then +- archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ if test "$GCC" = yes; then ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. +- # This should be the same for all languages, so no per-tag cache variable. +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 +-printf %s "checking whether the $host_os linker accepts -exported_symbol... " >&6; } +-if test ${lt_cv_irix_exported_symbol+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- save_LDFLAGS=$LDFLAGS +- LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ save_LDFLAGS="$LDFLAGS" ++ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +-int foo (void) { return 0; } ++int foo(void) {} + _ACEOF +-if ac_fn_c_try_link "$LINENO" +-then : +- lt_cv_irix_exported_symbol=yes +-else $as_nop +- lt_cv_irix_exported_symbol=no ++if ac_fn_c_try_link "$LINENO"; then : ++ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' ++ + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +- LDFLAGS=$save_LDFLAGS +-fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 +-printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; } +- if test yes = "$lt_cv_irix_exported_symbol"; then +- archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' +- fi ++ LDFLAGS="$save_LDFLAGS" + else +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' ++ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' ++ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' + fi + archive_cmds_need_lc='no' +- hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' ++ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + inherit_rpath=yes + link_all_deplibs=yes + ;; + +- linux*) +- case $cc_basename in +- tcc*) +- # Fabrice Bellard et al's Tiny C Compiler +- ld_shlibs=yes +- archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' +- ;; +- esac +- ;; +- + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out +@@ -12820,7 +10704,7 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; } + newsos6) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes +- hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' ++ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_shlibpath_var=no + ;; +@@ -12828,19 +10712,27 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; } + *nto* | *qnx*) + ;; + +- openbsd* | bitrig*) ++ openbsd*) + if test -f /usr/libexec/ld.so; then + hardcode_direct=yes + hardcode_shlibpath_var=no + hardcode_direct_absolute=yes +- if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then ++ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' +- archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' +- hardcode_libdir_flag_spec='$wl-rpath,$libdir' +- export_dynamic_flag_spec='$wl-E' ++ archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' ++ hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ++ export_dynamic_flag_spec='${wl}-E' + else +- archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' +- hardcode_libdir_flag_spec='$wl-rpath,$libdir' ++ case $host_os in ++ openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) ++ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' ++ hardcode_libdir_flag_spec='-R$libdir' ++ ;; ++ *) ++ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ++ hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ++ ;; ++ esac + fi + else + ld_shlibs=no +@@ -12851,54 +10743,33 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; } + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported +- shrext_cmds=.dll +- archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ +- $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ +- $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ +- $ECHO EXPORTS >> $output_objdir/$libname.def~ +- emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ +- $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ +- emximp -o $lib $output_objdir/$libname.def' +- archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ +- $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ +- $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ +- $ECHO EXPORTS >> $output_objdir/$libname.def~ +- prefix_cmds="$SED"~ +- if test EXPORTS = "`$SED 1q $export_symbols`"; then +- prefix_cmds="$prefix_cmds -e 1d"; +- fi~ +- prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ +- cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ +- $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ +- emximp -o $lib $output_objdir/$libname.def' +- old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' +- enable_shared_with_static_runtimes=yes +- file_list_spec='@' ++ archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' ++ old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) +- if test yes = "$GCC"; then +- allow_undefined_flag=' $wl-expect_unresolved $wl\*' +- archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ if test "$GCC" = yes; then ++ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' ++ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag=' -expect_unresolved \*' +- archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + fi + archive_cmds_need_lc='no' +- hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' ++ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag +- if test yes = "$GCC"; then +- allow_undefined_flag=' $wl-expect_unresolved $wl\*' +- archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' +- hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' ++ if test "$GCC" = yes; then ++ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' ++ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' +- archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ +- $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' ++ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' +@@ -12909,24 +10780,24 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; } + + solaris*) + no_undefined_flag=' -z defs' +- if test yes = "$GCC"; then +- wlarc='$wl' +- archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ++ if test "$GCC" = yes; then ++ wlarc='${wl}' ++ archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' +- archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' ++ archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ++ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) +- wlarc='$wl' +- archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' ++ wlarc='${wl}' ++ archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi +@@ -12936,11 +10807,11 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; } + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, +- # but understands '-z linker_flag'. GCC discards it without '$wl', ++ # but understands `-z linker_flag'. GCC discards it without `$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) +- if test yes = "$GCC"; then +- whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' ++ if test "$GCC" = yes; then ++ whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + else + whole_archive_flag_spec='-z allextract$convenience -z defaultextract' + fi +@@ -12950,10 +10821,10 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; } + ;; + + sunos4*) +- if test sequent = "$host_vendor"; then ++ if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. +- archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi +@@ -13002,43 +10873,43 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; } + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) +- no_undefined_flag='$wl-z,text' ++ no_undefined_flag='${wl}-z,text' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + runpath_var='LD_RUN_PATH' + +- if test yes = "$GCC"; then +- archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ if test "$GCC" = yes; then ++ archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else +- archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) +- # Note: We CANNOT use -z defs as we might desire, because we do not ++ # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. +- no_undefined_flag='$wl-z,text' +- allow_undefined_flag='$wl-z,nodefs' ++ no_undefined_flag='${wl}-z,text' ++ allow_undefined_flag='${wl}-z,nodefs' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no +- hardcode_libdir_flag_spec='$wl-R,$libdir' ++ hardcode_libdir_flag_spec='${wl}-R,$libdir' + hardcode_libdir_separator=':' + link_all_deplibs=yes +- export_dynamic_flag_spec='$wl-Bexport' ++ export_dynamic_flag_spec='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + +- if test yes = "$GCC"; then +- archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ if test "$GCC" = yes; then ++ archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else +- archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + +@@ -13053,18 +10924,18 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; } + ;; + esac + +- if test sni = "$host_vendor"; then ++ if test x$host_vendor = xsni; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) +- export_dynamic_flag_spec='$wl-Blargedynsym' ++ export_dynamic_flag_spec='${wl}-Blargedynsym' + ;; + esac + fi + fi + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 +-printf "%s\n" "$ld_shlibs" >&6; } +-test no = "$ld_shlibs" && can_build_shared=no ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 ++$as_echo "$ld_shlibs" >&6; } ++test "$ld_shlibs" = no && can_build_shared=no + + with_gnu_ld=$with_gnu_ld + +@@ -13090,7 +10961,7 @@ x|xyes) + # Assume -lc should be added + archive_cmds_need_lc=yes + +- if test yes,yes = "$GCC,$enable_shared"; then ++ if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. +@@ -13099,19 +10970,18 @@ x|xyes) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +-printf %s "checking whether -lc should be explicitly linked in... " >&6; } +-if test ${lt_cv_archive_cmds_need_lc+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 ++$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } ++if ${lt_cv_archive_cmds_need_lc+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest +@@ -13129,7 +10999,7 @@ else $as_nop + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc=no +@@ -13143,8 +11013,8 @@ else $as_nop + $RM conftest* + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 +-printf "%s\n" "$lt_cv_archive_cmds_need_lc" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 ++$as_echo "$lt_cv_archive_cmds_need_lc" >&6; } + archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc + ;; + esac +@@ -13303,17 +11173,22 @@ esac + + + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +-printf %s "checking dynamic linker characteristics... " >&6; } + +-if test yes = "$GCC"; then ++ ++ ++ ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 ++$as_echo_n "checking dynamic linker characteristics... " >&6; } ++ ++if test "$GCC" = yes; then + case $host_os in +- darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; +- *) lt_awk_arg='/^libraries:/' ;; ++ darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; ++ *) lt_awk_arg="/^libraries:/" ;; + esac + case $host_os in +- mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;; +- *) lt_sed_strip_eq='s|=/|/|g' ;; ++ mingw* | cegcc*) lt_sed_strip_eq="s,=\([A-Za-z]:\),\1,g" ;; ++ *) lt_sed_strip_eq="s,=/,/,g" ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in +@@ -13329,35 +11204,28 @@ if test yes = "$GCC"; then + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it +- # and add multilib dir if necessary... ++ # and add multilib dir if necessary. + lt_tmp_lt_search_path_spec= +- lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` +- # ...but if some path component already ends with the multilib dir we assume +- # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). +- case "$lt_multi_os_dir; $lt_search_path_spec " in +- "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) +- lt_multi_os_dir= +- ;; +- esac ++ lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + for lt_sys_path in $lt_search_path_spec; do +- if test -d "$lt_sys_path$lt_multi_os_dir"; then +- lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" +- elif test -n "$lt_multi_os_dir"; then ++ if test -d "$lt_sys_path/$lt_multi_os_dir"; then ++ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" ++ else + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +-BEGIN {RS = " "; FS = "/|\n";} { +- lt_foo = ""; +- lt_count = 0; ++BEGIN {RS=" "; FS="/|\n";} { ++ lt_foo=""; ++ lt_count=0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { +- lt_foo = "/" $lt_i lt_foo; ++ lt_foo="/" $lt_i lt_foo; + } else { + lt_count--; + } +@@ -13371,7 +11239,7 @@ BEGIN {RS = " "; FS = "/|\n";} { + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ +- $SED 's|/\([A-Za-z]:\)|\1|g'` ;; ++ $SED 's,/\([A-Za-z]:\),\1,g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` + else +@@ -13380,7 +11248,7 @@ fi + library_names_spec= + libname_spec='lib$name' + soname_spec= +-shrext_cmds=.so ++shrext_cmds=".so" + postinstall_cmds= + postuninstall_cmds= + finish_cmds= +@@ -13397,108 +11265,56 @@ hardcode_into_libs=no + # flags to be left without arguments + need_version=unknown + +- +- + case $host_os in + aix3*) +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$release$shared_ext$versuffix $libname.a' ++ version_type=linux ++ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. +- soname_spec='$libname$release$shared_ext$major' ++ soname_spec='${libname}${release}${shared_ext}$major' + ;; + + aix[4-9]*) +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes +- if test ia64 = "$host_cpu"; then ++ if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 +- library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' ++ library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with +- # the line '#! .'. This would cause the generated library to +- # depend on '.', always an invalid library. This was fixed in ++ # the line `#! .'. This would cause the generated library to ++ # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' +- echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then ++ echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac +- # Using Import Files as archive members, it is possible to support +- # filename-based versioning of shared library archives on AIX. While +- # this would work for both with and without runtime linking, it will +- # prevent static linking of such archives. So we do filename-based +- # shared library versioning with .so extension only, which is used +- # when both runtime linking and shared linking is enabled. +- # Unfortunately, runtime linking may impact performance, so we do +- # not want this to be the default eventually. Also, we use the +- # versioned .so libs for executables only if there is the -brtl +- # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. +- # To allow for filename-based versioning support, we need to create +- # libNAME.so.V as an archive file, containing: +- # *) an Import File, referring to the versioned filename of the +- # archive as well as the shared archive member, telling the +- # bitwidth (32 or 64) of that shared object, and providing the +- # list of exported symbols of that shared object, eventually +- # decorated with the 'weak' keyword +- # *) the shared object with the F_LOADONLY flag set, to really avoid +- # it being seen by the linker. +- # At run time we better use the real file rather than another symlink, +- # but for link time we create the symlink libNAME.so -> libNAME.so.V +- +- case $with_aix_soname,$aix_use_runtimelinking in +- # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct ++ # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. +- aix,yes) # traditional libtool +- dynamic_linker='AIX unversionable lib.so' ++ if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- ;; +- aix,no) # traditional AIX only +- dynamic_linker='AIX lib.a(lib.so.V)' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. +- library_names_spec='$libname$release.a $libname.a' +- soname_spec='$libname$release$shared_ext$major' +- ;; +- svr4,*) # full svr4 only +- dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" +- library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' +- # We do not specify a path in Import Files, so LIBPATH fires. +- shlibpath_overrides_runpath=yes +- ;; +- *,yes) # both, prefer svr4 +- dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" +- library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' +- # unpreferred sharedlib libNAME.a needs extra handling +- postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' +- postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' +- # We do not specify a path in Import Files, so LIBPATH fires. +- shlibpath_overrides_runpath=yes +- ;; +- *,no) # both, prefer aix +- dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" +- library_names_spec='$libname$release.a $libname.a' +- soname_spec='$libname$release$shared_ext$major' +- # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling +- postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' +- postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' +- ;; +- esac ++ library_names_spec='${libname}${release}.a $libname.a' ++ soname_spec='${libname}${release}${shared_ext}$major' ++ fi + shlibpath_var=LIBPATH + fi + ;; +@@ -13508,27 +11324,27 @@ amigaos*) + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. +- finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ++ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + + beos*) +- library_names_spec='$libname$shared_ext' ++ library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + + bsdi[45]*) +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" +@@ -13540,17 +11356,16 @@ bsdi[45]*) + + cygwin* | mingw* | pw32* | cegcc*) + version_type=windows +- shrext_cmds=.dll ++ shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + +- case $GCC,$cc_basename in +- yes,*) +- # gcc ++ case $GCC,$host_os in ++ yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds +- postinstall_cmds='base_file=`basename \$file`~ +- dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ ++ postinstall_cmds='base_file=`basename \${file}`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ +@@ -13566,84 +11381,26 @@ cygwin* | mingw* | pw32* | cegcc*) + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' +- soname_spec='`echo $libname | $SED -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ++ soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix +- soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ++ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' +- library_names_spec='`echo $libname | $SED -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' +- ;; +- esac +- dynamic_linker='Win32 ld.exe' +- ;; +- +- *,cl* | *,icl*) +- # Native MSVC or ICC +- libname_spec='$name' +- soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' +- library_names_spec='$libname.dll.lib' +- +- case $build_os in +- mingw*) +- sys_lib_search_path_spec= +- lt_save_ifs=$IFS +- IFS=';' +- for lt_path in $LIB +- do +- IFS=$lt_save_ifs +- # Let DOS variable expansion print the short 8.3 style file name. +- lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` +- sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" +- done +- IFS=$lt_save_ifs +- # Convert to MSYS style. +- sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` +- ;; +- cygwin*) +- # Convert to unix form, then to dos form, then back to unix form +- # but this time dos style (no spaces!) so that the unix form looks +- # like /cygdrive/c/PROGRA~1:/cygdr... +- sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` +- sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` +- sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` +- ;; +- *) +- sys_lib_search_path_spec=$LIB +- if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then +- # It is most probably a Windows format PATH. +- sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` +- else +- sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` +- fi +- # FIXME: find the short name or the path components, as spaces are +- # common. (e.g. "Program Files" -> "PROGRA~1") ++ library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac +- +- # DLL is installed to $(libdir)/../bin by postinstall_cmds +- postinstall_cmds='base_file=`basename \$file`~ +- dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ +- dldir=$destdir/`dirname \$dlpath`~ +- test -d \$dldir || mkdir -p \$dldir~ +- $install_prog $dir/$dlname \$dldir/$dlname' +- postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ +- dlpath=$dir/\$dldll~ +- $RM \$dlpath' +- shlibpath_overrides_runpath=yes +- dynamic_linker='Win32 link.exe' + ;; + + *) +- # Assume MSVC and ICC wrapper +- library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' +- dynamic_linker='Win32 ld.exe' ++ library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac ++ dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; +@@ -13653,8 +11410,8 @@ darwin* | rhapsody*) + version_type=darwin + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' +- soname_spec='$libname$release$major$shared_ext' ++ library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' ++ soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +@@ -13664,15 +11421,15 @@ darwin* | rhapsody*) + ;; + + dgux*) +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +-freebsd* | dragonfly* | midnightbsd*) ++freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then +@@ -13686,13 +11443,12 @@ freebsd* | dragonfly* | midnightbsd*) + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) +- library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac +@@ -13718,15 +11474,15 @@ freebsd* | dragonfly* | midnightbsd*) + ;; + + haiku*) +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH +- shlibpath_overrides_runpath=no +- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' ++ shlibpath_overrides_runpath=yes ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' + hardcode_into_libs=yes + ;; + +@@ -13743,15 +11499,14 @@ hpux9* | hpux10* | hpux11*) + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- if test 32 = "$HPUX_IA64_MODE"; then ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' ++ if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" +- sys_lib_dlsearch_path_spec=/usr/lib/hpux32 + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" +- sys_lib_dlsearch_path_spec=/usr/lib/hpux64 + fi ++ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' +@@ -13759,8 +11514,8 @@ hpux9* | hpux10* | hpux11*) + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; +@@ -13769,8 +11524,8 @@ hpux9* | hpux10* | hpux11*) + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... +@@ -13780,11 +11535,11 @@ hpux9* | hpux10* | hpux11*) + ;; + + interix[3-9]*) +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no +@@ -13795,16 +11550,16 @@ irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) +- if test yes = "$lt_cv_prog_gnu_ld"; then +- version_type=linux # correct to gnu/linux during the next big refactor ++ if test "$lt_cv_prog_gnu_ld" = yes; then ++ version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no +- soname_spec='$libname$release$shared_ext$major' +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' ++ soname_spec='${libname}${release}${shared_ext}$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= +@@ -13823,8 +11578,8 @@ irix5* | irix6* | nonstopux*) + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no +- sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" +- sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" ++ sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" ++ sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +@@ -13833,42 +11588,26 @@ linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +-linux*android*) +- version_type=none # Android doesn't support versioned libraries. +- need_lib_prefix=no +- need_version=no +- library_names_spec='$libname$release$shared_ext' +- soname_spec='$libname$release$shared_ext' +- finish_cmds= +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=yes +- +- # This implies no fast_install, which is unacceptable. +- # Some rework will be needed to allow for fast_install +- # before this can be enabled. +- hardcode_into_libs=yes +- +- dynamic_linker='Android linker' +- # Don't embed -rpath directories since the linker doesn't support them. +- hardcode_libdir_flag_spec='-L$libdir' +- ;; ++# This must be Linux ELF. + +-# This must be glibc/ELF. +-linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) +- version_type=linux # correct to gnu/linux during the next big refactor ++# uclinux* changes (here and below) have been submitted to the libtool ++# project, but have not yet been accepted: they are GCC-local changes ++# for the time being. (See ++# https://lists.gnu.org/archive/html/libtool-patches/2018-05/msg00000.html) ++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu* | uclinuxfdpiceabi) ++ version_type=linux + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH +- if test ${lt_cv_shlibpath_overrides_runpath+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ if ${lt_cv_shlibpath_overrides_runpath+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir +@@ -13878,21 +11617,19 @@ else $as_nop + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_link "$LINENO" +-then : +- if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null +-then : ++if ac_fn_c_try_link "$LINENO"; then : ++ if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : + lt_cv_shlibpath_overrides_runpath=yes + fi + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir +@@ -13906,18 +11643,10 @@ fi + # before this can be enabled. + hardcode_into_libs=yes + +- # Add ABI-specific directories to the system library path. +- sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" +- +- # Ideally, we could use ldconfig to report *all* directores which are +- # searched for libraries, however this is still not possible. Aside from not +- # being certain /sbin/ldconfig is available, command +- # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, +- # even though it is searched at run-time. Try to do the best guess by +- # appending ld.so.conf contents (and includes) to the search path. ++ # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` +- sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" ++ sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on +@@ -13934,12 +11663,12 @@ netbsd*) + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then +- library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH +@@ -13948,8 +11677,8 @@ netbsd*) + ;; + + newsos6) +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ version_type=linux ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; +@@ -13958,68 +11687,58 @@ newsos6) + version_type=qnx + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +-openbsd* | bitrig*) ++openbsd*) + version_type=sunos +- sys_lib_dlsearch_path_spec=/usr/lib ++ sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no +- if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then +- need_version=no ++ # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. ++ case $host_os in ++ openbsd3.3 | openbsd3.3.*) need_version=yes ;; ++ *) need_version=no ;; ++ esac ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' ++ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' ++ shlibpath_var=LD_LIBRARY_PATH ++ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then ++ case $host_os in ++ openbsd2.[89] | openbsd2.[89].*) ++ shlibpath_overrides_runpath=no ++ ;; ++ *) ++ shlibpath_overrides_runpath=yes ++ ;; ++ esac + else +- need_version=yes ++ shlibpath_overrides_runpath=yes + fi +- library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' +- finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=yes + ;; + + os2*) + libname_spec='$name' +- version_type=windows +- shrext_cmds=.dll +- need_version=no ++ shrext_cmds=".dll" + need_lib_prefix=no +- # OS/2 can only load a DLL with a base name of 8 characters or less. +- soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; +- v=$($ECHO $release$versuffix | tr -d .-); +- n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); +- $ECHO $n$v`$shared_ext' +- library_names_spec='${libname}_dll.$libext' ++ library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' +- shlibpath_var=BEGINLIBPATH +- sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +- sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec +- postinstall_cmds='base_file=`basename \$file`~ +- dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ +- dldir=$destdir/`dirname \$dlpath`~ +- test -d \$dldir || mkdir -p \$dldir~ +- $install_prog $dir/$dlname \$dldir/$dlname~ +- chmod a+x \$dldir/$dlname~ +- if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then +- eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; +- fi' +- postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ +- dlpath=$dir/\$dldll~ +- $RM \$dlpath' ++ shlibpath_var=LIBPATH + ;; + + osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no +- soname_spec='$libname$release$shared_ext$major' +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='${libname}${release}${shared_ext}$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" +- sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ++ sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + + rdos*) +@@ -14027,11 +11746,11 @@ rdos*) + ;; + + solaris*) +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes +@@ -14041,20 +11760,20 @@ solaris*) + + sunos4*) + version_type=sunos +- library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes +- if test yes = "$with_gnu_ld"; then ++ if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + + sysv4 | sysv4.3*) +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ version_type=linux ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) +@@ -14075,24 +11794,24 @@ sysv4 | sysv4.3*) + ;; + + sysv4*MP*) +- if test -d /usr/nec; then +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' +- soname_spec='$libname$shared_ext.$major' ++ if test -d /usr/nec ;then ++ version_type=linux ++ library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' ++ soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) +- version_type=sco ++ version_type=freebsd-elf + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes +- if test yes = "$with_gnu_ld"; then ++ if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' +@@ -14107,19 +11826,19 @@ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + + tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + + uts4*) +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ version_type=linux ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +@@ -14127,33 +11846,22 @@ uts4*) + dynamic_linker=no + ;; + esac +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +-printf "%s\n" "$dynamic_linker" >&6; } +-test no = "$dynamic_linker" && can_build_shared=no ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 ++$as_echo "$dynamic_linker" >&6; } ++test "$dynamic_linker" = no && can_build_shared=no + + variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +-if test yes = "$GCC"; then ++if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" + fi + +-if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then +- sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec ++if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then ++ sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" + fi +- +-if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then +- sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec ++if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then ++ sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" + fi + +-# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... +-configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec +- +-# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code +-func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" +- +-# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool +-configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH +- +- + + + +@@ -14245,24 +11953,20 @@ configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH + + + +- +- +- +- +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +-printf %s "checking how to hardcode library paths into programs... " >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 ++$as_echo_n "checking how to hardcode library paths into programs... " >&6; } + hardcode_action= + if test -n "$hardcode_libdir_flag_spec" || + test -n "$runpath_var" || +- test yes = "$hardcode_automatic"; then ++ test "X$hardcode_automatic" = "Xyes" ; then + + # We can hardcode non-existent directories. +- if test no != "$hardcode_direct" && ++ if test "$hardcode_direct" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one +- ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" && +- test no != "$hardcode_minus_L"; then ++ ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no && ++ test "$hardcode_minus_L" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else +@@ -14274,15 +11978,15 @@ else + # directories. + hardcode_action=unsupported + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 +-printf "%s\n" "$hardcode_action" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 ++$as_echo "$hardcode_action" >&6; } + +-if test relink = "$hardcode_action" || +- test yes = "$inherit_rpath"; then ++if test "$hardcode_action" = relink || ++ test "$inherit_rpath" = yes; then + # Fast installation is not supported + enable_fast_install=no +-elif test yes = "$shlibpath_overrides_runpath" || +- test no = "$enable_shared"; then ++elif test "$shlibpath_overrides_runpath" = yes || ++ test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless + fi +@@ -14292,7 +11996,7 @@ fi + + + +- if test yes != "$enable_dlopen"; then ++ if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +@@ -14302,29 +12006,28 @@ else + + case $host_os in + beos*) +- lt_cv_dlopen=load_add_on ++ lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) +- lt_cv_dlopen=LoadLibrary ++ lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) +- lt_cv_dlopen=dlopen ++ lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) +- # if libdl is installed we need to link against it +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +-printf %s "checking for dlopen in -ldl... " >&6; } +-if test ${ac_cv_lib_dl_dlopen+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ # if libdl is installed we need to link against it ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 ++$as_echo_n "checking for dlopen in -ldl... " >&6; } ++if ${ac_cv_lib_dl_dlopen+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + ac_check_lib_save_LIBS=$LIBS + LIBS="-ldl $LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +@@ -14333,33 +12036,34 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ ++#ifdef __cplusplus ++extern "C" ++#endif + char dlopen (); + int +-main (void) ++main () + { + return dlopen (); + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_link "$LINENO" +-then : ++if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +-else $as_nop ++else + ac_cv_lib_dl_dlopen=no + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LIBS=$ac_check_lib_save_LIBS + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +-printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; } +-if test "x$ac_cv_lib_dl_dlopen" = xyes +-then : +- lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 ++$as_echo "$ac_cv_lib_dl_dlopen" >&6; } ++if test "x$ac_cv_lib_dl_dlopen" = xyes; then : ++ lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" ++else + +- lt_cv_dlopen=dyld ++ lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +@@ -14367,26 +12071,16 @@ fi + + ;; + +- tpf*) +- # Don't try to run any link tests for TPF. We know it's impossible +- # because TPF is a cross-compiler, and we know how we open DSOs. +- lt_cv_dlopen=dlopen +- lt_cv_dlopen_libs= +- lt_cv_dlopen_self=no +- ;; +- + *) + ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" +-if test "x$ac_cv_func_shl_load" = xyes +-then : +- lt_cv_dlopen=shl_load +-else $as_nop +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 +-printf %s "checking for shl_load in -ldld... " >&6; } +-if test ${ac_cv_lib_dld_shl_load+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++if test "x$ac_cv_func_shl_load" = xyes; then : ++ lt_cv_dlopen="shl_load" ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 ++$as_echo_n "checking for shl_load in -ldld... " >&6; } ++if ${ac_cv_lib_dld_shl_load+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + ac_check_lib_save_LIBS=$LIBS + LIBS="-ldld $LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +@@ -14395,42 +12089,41 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ ++#ifdef __cplusplus ++extern "C" ++#endif + char shl_load (); + int +-main (void) ++main () + { + return shl_load (); + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_link "$LINENO" +-then : ++if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_shl_load=yes +-else $as_nop ++else + ac_cv_lib_dld_shl_load=no + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LIBS=$ac_check_lib_save_LIBS + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 +-printf "%s\n" "$ac_cv_lib_dld_shl_load" >&6; } +-if test "x$ac_cv_lib_dld_shl_load" = xyes +-then : +- lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 ++$as_echo "$ac_cv_lib_dld_shl_load" >&6; } ++if test "x$ac_cv_lib_dld_shl_load" = xyes; then : ++ lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld" ++else + ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" +-if test "x$ac_cv_func_dlopen" = xyes +-then : +- lt_cv_dlopen=dlopen +-else $as_nop +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +-printf %s "checking for dlopen in -ldl... " >&6; } +-if test ${ac_cv_lib_dl_dlopen+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++if test "x$ac_cv_func_dlopen" = xyes; then : ++ lt_cv_dlopen="dlopen" ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 ++$as_echo_n "checking for dlopen in -ldl... " >&6; } ++if ${ac_cv_lib_dl_dlopen+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + ac_check_lib_save_LIBS=$LIBS + LIBS="-ldl $LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +@@ -14439,37 +12132,37 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ ++#ifdef __cplusplus ++extern "C" ++#endif + char dlopen (); + int +-main (void) ++main () + { + return dlopen (); + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_link "$LINENO" +-then : ++if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +-else $as_nop ++else + ac_cv_lib_dl_dlopen=no + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LIBS=$ac_check_lib_save_LIBS + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +-printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; } +-if test "x$ac_cv_lib_dl_dlopen" = xyes +-then : +- lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl +-else $as_nop +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 +-printf %s "checking for dlopen in -lsvld... " >&6; } +-if test ${ac_cv_lib_svld_dlopen+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 ++$as_echo "$ac_cv_lib_dl_dlopen" >&6; } ++if test "x$ac_cv_lib_dl_dlopen" = xyes; then : ++ lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 ++$as_echo_n "checking for dlopen in -lsvld... " >&6; } ++if ${ac_cv_lib_svld_dlopen+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + ac_check_lib_save_LIBS=$LIBS + LIBS="-lsvld $LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +@@ -14478,37 +12171,37 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ ++#ifdef __cplusplus ++extern "C" ++#endif + char dlopen (); + int +-main (void) ++main () + { + return dlopen (); + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_link "$LINENO" +-then : ++if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_svld_dlopen=yes +-else $as_nop ++else + ac_cv_lib_svld_dlopen=no + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LIBS=$ac_check_lib_save_LIBS + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 +-printf "%s\n" "$ac_cv_lib_svld_dlopen" >&6; } +-if test "x$ac_cv_lib_svld_dlopen" = xyes +-then : +- lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld +-else $as_nop +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 +-printf %s "checking for dld_link in -ldld... " >&6; } +-if test ${ac_cv_lib_dld_dld_link+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 ++$as_echo "$ac_cv_lib_svld_dlopen" >&6; } ++if test "x$ac_cv_lib_svld_dlopen" = xyes; then : ++ lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 ++$as_echo_n "checking for dld_link in -ldld... " >&6; } ++if ${ac_cv_lib_dld_dld_link+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + ac_check_lib_save_LIBS=$LIBS + LIBS="-ldld $LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +@@ -14517,30 +12210,31 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ ++#ifdef __cplusplus ++extern "C" ++#endif + char dld_link (); + int +-main (void) ++main () + { + return dld_link (); + ; + return 0; + } + _ACEOF +-if ac_fn_c_try_link "$LINENO" +-then : ++if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_dld_link=yes +-else $as_nop ++else + ac_cv_lib_dld_dld_link=no + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LIBS=$ac_check_lib_save_LIBS + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 +-printf "%s\n" "$ac_cv_lib_dld_dld_link" >&6; } +-if test "x$ac_cv_lib_dld_dld_link" = xyes +-then : +- lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 ++$as_echo "$ac_cv_lib_dld_dld_link" >&6; } ++if test "x$ac_cv_lib_dld_dld_link" = xyes; then : ++ lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld" + fi + + +@@ -14561,36 +12255,35 @@ fi + ;; + esac + +- if test no = "$lt_cv_dlopen"; then +- enable_dlopen=no +- else ++ if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes ++ else ++ enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) +- save_CPPFLAGS=$CPPFLAGS +- test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" ++ save_CPPFLAGS="$CPPFLAGS" ++ test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + +- save_LDFLAGS=$LDFLAGS ++ save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + +- save_LIBS=$LIBS ++ save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 +-printf %s "checking whether a program can dlopen itself... " >&6; } +-if test ${lt_cv_dlopen_self+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if test yes = "$cross_compiling"; then : ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 ++$as_echo_n "checking whether a program can dlopen itself... " >&6; } ++if ${lt_cv_dlopen_self+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self=cross + else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line $LINENO "configure" ++#line 12286 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -14631,13 +12324,13 @@ else + # endif + #endif + +-/* When -fvisibility=hidden is used, assume the code has been annotated ++/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +-#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-int fnord () __attribute__((visibility("default"))); ++#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) ++void fnord () __attribute__((visibility("default"))); + #endif + +-int fnord () { return 42; } ++void fnord () { int i=42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -14662,8 +12355,8 @@ _LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 +- test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in +@@ -14680,24 +12373,23 @@ rm -fr conftest* + + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 +-printf "%s\n" "$lt_cv_dlopen_self" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 ++$as_echo "$lt_cv_dlopen_self" >&6; } + +- if test yes = "$lt_cv_dlopen_self"; then ++ if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 +-printf %s "checking whether a statically linked program can dlopen itself... " >&6; } +-if test ${lt_cv_dlopen_self_static+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- if test yes = "$cross_compiling"; then : ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 ++$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } ++if ${lt_cv_dlopen_self_static+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self_static=cross + else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line $LINENO "configure" ++#line 12392 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -14738,13 +12430,13 @@ else + # endif + #endif + +-/* When -fvisibility=hidden is used, assume the code has been annotated ++/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +-#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-int fnord () __attribute__((visibility("default"))); ++#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) ++void fnord () __attribute__((visibility("default"))); + #endif + +-int fnord () { return 42; } ++void fnord () { int i=42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -14769,8 +12461,8 @@ _LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 +- test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in +@@ -14787,13 +12479,13 @@ rm -fr conftest* + + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 +-printf "%s\n" "$lt_cv_dlopen_self_static" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 ++$as_echo "$lt_cv_dlopen_self_static" >&6; } + fi + +- CPPFLAGS=$save_CPPFLAGS +- LDFLAGS=$save_LDFLAGS +- LIBS=$save_LIBS ++ CPPFLAGS="$save_CPPFLAGS" ++ LDFLAGS="$save_LDFLAGS" ++ LIBS="$save_LIBS" + ;; + esac + +@@ -14826,43 +12518,32 @@ fi + + striplib= + old_striplib= +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 +-printf %s "checking whether stripping libraries is possible... " >&6; } +-if test -z "$STRIP"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +-else +- if $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then +- old_striplib="$STRIP --strip-debug" +- striplib="$STRIP --strip-unneeded" +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +-printf "%s\n" "yes" >&6; } +- else +- case $host_os in +- darwin*) +- # FIXME - insert some real tests, host_os isn't really good enough ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 ++$as_echo_n "checking whether stripping libraries is possible... " >&6; } ++if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then ++ test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" ++ test -z "$striplib" && striplib="$STRIP --strip-unneeded" ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 ++$as_echo "yes" >&6; } ++else ++# FIXME - insert some real tests, host_os isn't really good enough ++ case $host_os in ++ darwin*) ++ if test -n "$STRIP" ; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +-printf "%s\n" "yes" >&6; } +- ;; +- freebsd*) +- if $STRIP -V 2>&1 | $GREP "elftoolchain" >/dev/null; then +- old_striplib="$STRIP --strip-debug" +- striplib="$STRIP --strip-unneeded" +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +-printf "%s\n" "yes" >&6; } +- else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +- fi +- ;; +- *) +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } +- ;; +- esac +- fi ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 ++$as_echo "yes" >&6; } ++ else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++ fi ++ ;; ++ *) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++ ;; ++ esac + fi + + +@@ -14876,21 +12557,21 @@ fi + + + +- # Report what library types will actually be built +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 +-printf %s "checking if libtool supports shared libraries... " >&6; } +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 +-printf "%s\n" "$can_build_shared" >&6; } ++ # Report which library types will actually be built ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 ++$as_echo_n "checking if libtool supports shared libraries... " >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 ++$as_echo "$can_build_shared" >&6; } + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 +-printf %s "checking whether to build shared libraries... " >&6; } +- test no = "$can_build_shared" && enable_shared=no ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 ++$as_echo_n "checking whether to build shared libraries... " >&6; } ++ test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) +- test yes = "$enable_shared" && enable_static=no ++ test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' +@@ -14898,24 +12579,20 @@ printf %s "checking whether to build shared libraries... " >&6; } + ;; + + aix[4-9]*) +- if test ia64 != "$host_cpu"; then +- case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in +- yes,aix,yes) ;; # shared object as lib.so file only +- yes,svr4,*) ;; # shared object as lib.so archive member only +- yes,*) enable_static=no ;; # shared object in lib.a archive as well +- esac ++ if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then ++ test "$enable_shared" = yes && enable_static=no + fi + ;; + esac +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 +-printf "%s\n" "$enable_shared" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 ++$as_echo "$enable_shared" >&6; } + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 +-printf %s "checking whether to build static libraries... " >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 ++$as_echo_n "checking whether to build static libraries... " >&6; } + # Make sure either enable_shared or enable_static is yes. +- test yes = "$enable_shared" || enable_static=yes +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 +-printf "%s\n" "$enable_static" >&6; } ++ test "$enable_shared" = yes || enable_static=yes ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 ++$as_echo "$enable_static" >&6; } + + + +@@ -14927,42 +12604,46 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' + ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' + ac_compiler_gnu=$ac_cv_c_compiler_gnu + +-CC=$lt_save_CC ++CC="$lt_save_CC" + +- if test -n "$CXX" && ( test no != "$CXX" && +- ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || +- (test g++ != "$CXX"))); then ++ if test -n "$CXX" && ( test "X$CXX" != "Xno" && ++ ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || ++ (test "X$CXX" != "Xg++"))) ; then + ac_ext=cpp + ac_cpp='$CXXCPP $CPPFLAGS' + ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' + ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' + ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 +-printf %s "checking how to run the C++ preprocessor... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 ++$as_echo_n "checking how to run the C++ preprocessor... " >&6; } + if test -z "$CXXCPP"; then +- if test ${ac_cv_prog_CXXCPP+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- # Double quotes because $CXX needs to be expanded +- for CXXCPP in "$CXX -E" cpp /lib/cpp ++ if ${ac_cv_prog_CXXCPP+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ # Double quotes because CXXCPP needs to be expanded ++ for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false + for ac_cxx_preproc_warn_flag in '' yes + do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. ++ # Prefer to if __STDC__ is defined, since ++ # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +-#include ++#ifdef __STDC__ ++# include ++#else ++# include ++#endif + Syntax error + _ACEOF +-if ac_fn_cxx_try_cpp "$LINENO" +-then : ++if ac_fn_cxx_try_cpp "$LINENO"; then : + +-else $as_nop ++else + # Broken: fails on valid input. + continue + fi +@@ -14974,11 +12655,10 @@ rm -f conftest.err conftest.i conftest.$ac_ext + /* end confdefs.h. */ + #include + _ACEOF +-if ac_fn_cxx_try_cpp "$LINENO" +-then : ++if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. + continue +-else $as_nop ++else + # Passes both tests. + ac_preproc_ok=: + break +@@ -14988,8 +12668,7 @@ rm -f conftest.err conftest.i conftest.$ac_ext + done + # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. + rm -f conftest.i conftest.err conftest.$ac_ext +-if $ac_preproc_ok +-then : ++if $ac_preproc_ok; then : + break + fi + +@@ -15001,24 +12680,29 @@ fi + else + ac_cv_prog_CXXCPP=$CXXCPP + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 +-printf "%s\n" "$CXXCPP" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 ++$as_echo "$CXXCPP" >&6; } + ac_preproc_ok=false + for ac_cxx_preproc_warn_flag in '' yes + do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. ++ # Prefer to if __STDC__ is defined, since ++ # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +-#include ++#ifdef __STDC__ ++# include ++#else ++# include ++#endif + Syntax error + _ACEOF +-if ac_fn_cxx_try_cpp "$LINENO" +-then : ++if ac_fn_cxx_try_cpp "$LINENO"; then : + +-else $as_nop ++else + # Broken: fails on valid input. + continue + fi +@@ -15030,11 +12714,10 @@ rm -f conftest.err conftest.i conftest.$ac_ext + /* end confdefs.h. */ + #include + _ACEOF +-if ac_fn_cxx_try_cpp "$LINENO" +-then : ++if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. + continue +-else $as_nop ++else + # Passes both tests. + ac_preproc_ok=: + break +@@ -15044,12 +12727,11 @@ rm -f conftest.err conftest.i conftest.$ac_ext + done + # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. + rm -f conftest.i conftest.err conftest.$ac_ext +-if $ac_preproc_ok +-then : ++if $ac_preproc_ok; then : + +-else $as_nop +- { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++else ++ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check + See \`config.log' for more details" "$LINENO" 5; } + fi +@@ -15079,6 +12761,7 @@ export_dynamic_flag_spec_CXX= + hardcode_direct_CXX=no + hardcode_direct_absolute_CXX=no + hardcode_libdir_flag_spec_CXX= ++hardcode_libdir_flag_spec_ld_CXX= + hardcode_libdir_separator_CXX= + hardcode_minus_L_CXX=no + hardcode_shlibpath_var_CXX=unsupported +@@ -15105,7 +12788,7 @@ objext_CXX=$objext + # the CXX compiler isn't working. Some variables (like enable_shared) + # are currently assumed to apply to all compilers on this platform, + # and will be corrupted by setting them based on a non-working compiler. +-if test yes != "$_lt_caught_CXX_error"; then ++if test "$_lt_caught_CXX_error" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + +@@ -15145,7 +12828,6 @@ $RM -r conftest* + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC +- lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX +@@ -15163,43 +12845,48 @@ $RM -r conftest* + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} +- CFLAGS=$CXXFLAGS + compiler=$CC + compiler_CXX=$CC +- func_cc_basename $compiler +-cc_basename=$func_cc_basename_result ++ for cc_temp in $compiler""; do ++ case $cc_temp in ++ compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; ++ distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; ++ \-*) ;; ++ *) break;; ++ esac ++done ++cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` + + + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately +- if test yes = "$GXX"; then ++ if test "$GXX" = yes; then + lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' + else + lt_prog_compiler_no_builtin_flag_CXX= + fi + +- if test yes = "$GXX"; then ++ if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + + + # Check whether --with-gnu-ld was given. +-if test ${with_gnu_ld+y} +-then : +- withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes +-else $as_nop ++if test "${with_gnu_ld+set}" = set; then : ++ withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes ++else + with_gnu_ld=no + fi + + ac_prog=ld +-if test yes = "$GCC"; then ++if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +-printf %s "checking for ld used by $CC... " >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 ++$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) +- # gcc leaves a trailing carriage return, which upsets mingw ++ # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; +@@ -15213,7 +12900,7 @@ printf %s "checking for ld used by $CC... " >&6; } + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done +- test -z "$LD" && LD=$ac_prog ++ test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. +@@ -15224,58 +12911,56 @@ printf %s "checking for ld used by $CC... " >&6; } + with_gnu_ld=unknown + ;; + esac +-elif test yes = "$with_gnu_ld"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +-printf %s "checking for GNU ld... " >&6; } ++elif test "$with_gnu_ld" = yes; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 ++$as_echo_n "checking for GNU ld... " >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +-printf %s "checking for non-GNU ld... " >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 ++$as_echo_n "checking for non-GNU ld... " >&6; } + fi +-if test ${lt_cv_path_LD+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++if ${lt_cv_path_LD+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + if test -z "$LD"; then +- lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ++ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do +- IFS=$lt_save_ifs ++ IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then +- lt_cv_path_LD=$ac_dir/$ac_prog ++ lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +-printf "%s\n" "$LD" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 ++$as_echo "$LD" >&6; } + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +-printf "%s\n" "no" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } + fi + test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +-printf %s "checking if the linker ($LD) is GNU ld... " >&6; } +-if test ${lt_cv_prog_gnu_ld+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 ++$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } ++if ${lt_cv_prog_gnu_ld+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + # I'd rather use --version here, but apparently some GNU lds only accept -v. + case `$LD -v 2>&1 &1 &5 +-printf "%s\n" "$lt_cv_prog_gnu_ld" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_gnu_ld" >&5 ++$as_echo "$lt_cv_prog_gnu_ld" >&6; } + with_gnu_ld=$lt_cv_prog_gnu_ld + + +@@ -15298,22 +12983,22 @@ with_gnu_ld=$lt_cv_prog_gnu_ld + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. +- if test yes = "$with_gnu_ld"; then +- archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' +- archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ if test "$with_gnu_ld" = yes; then ++ archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + +- hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' +- export_dynamic_flag_spec_CXX='$wl--export-dynamic' ++ hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' ++ export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) +- wlarc='$wl' ++ wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then +- whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' ++ whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec_CXX= + fi +@@ -15341,8 +13026,8 @@ with_gnu_ld=$lt_cv_prog_gnu_ld + fi + + # PORTME: fill in a description of your system's C++ link characteristics +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +-printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 ++$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + ld_shlibs_CXX=yes + case $host_os in + aix3*) +@@ -15350,30 +13035,18 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries + ld_shlibs_CXX=no + ;; + aix[4-9]*) +- if test ia64 = "$host_cpu"; then ++ if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' +- no_entry_flag= ++ no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we +- # have runtime linking enabled, and use it for executables. +- # For shared libraries, we enable/disable runtime linking +- # depending on the kind of the shared library created - +- # when "with_aix_soname,aix_use_runtimelinking" is: +- # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables +- # "aix,yes" lib.so shared, rtl:yes, for executables +- # lib.a static archive +- # "both,no" lib.so.V(shr.o) shared, rtl:yes +- # lib.a(lib.so.V) shared, rtl:no, for executables +- # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables +- # lib.a(lib.so.V) shared, rtl:no +- # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables +- # lib.a static archive ++ # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in +@@ -15383,13 +13056,6 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries + ;; + esac + done +- if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then +- # With aix-soname=svr4, we create the lib.so.V shared archives only, +- # so we don't have lib.a shared libs to link our executables. +- # We have to force runtime linking in this case. +- aix_use_runtimelinking=yes +- LDFLAGS="$LDFLAGS -Wl,-brtl" +- fi + ;; + esac + +@@ -15408,21 +13074,13 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries + hardcode_direct_absolute_CXX=yes + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes +- file_list_spec_CXX='$wl-f,' +- case $with_aix_soname,$aix_use_runtimelinking in +- aix,*) ;; # no import file +- svr4,* | *,yes) # use import file +- # The Import File defines what to hardcode. +- hardcode_direct_CXX=no +- hardcode_direct_absolute_CXX=no +- ;; +- esac ++ file_list_spec_CXX='${wl}-f,' + +- if test yes = "$GXX"; then ++ if test "$GXX" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ +- collect2name=`$CC -print-prog-name=collect2` ++ collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then +@@ -15440,172 +13098,122 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries + fi + esac + shared_flag='-shared' +- if test yes = "$aix_use_runtimelinking"; then +- shared_flag=$shared_flag' $wl-G' ++ if test "$aix_use_runtimelinking" = yes; then ++ shared_flag="$shared_flag "'${wl}-G' + fi +- # Need to ensure runtime linking is disabled for the traditional +- # shared library, or the linker may eventually find shared libraries +- # /with/ Import File - we do not want to mix them. +- shared_flag_aix='-shared' +- shared_flag_svr4='-shared $wl-G' + else + # not using gcc +- if test ia64 = "$host_cpu"; then ++ if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else +- if test yes = "$aix_use_runtimelinking"; then +- shared_flag='$wl-G' ++ if test "$aix_use_runtimelinking" = yes; then ++ shared_flag='${wl}-G' + else +- shared_flag='$wl-bM:SRE' ++ shared_flag='${wl}-bM:SRE' + fi +- shared_flag_aix='$wl-bM:SRE' +- shared_flag_svr4='$wl-G' + fi + fi + +- export_dynamic_flag_spec_CXX='$wl-bexpall' ++ export_dynamic_flag_spec_CXX='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + always_export_symbols_CXX=yes +- if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then ++ if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. +- # The "-G" linker flag allows undefined symbols. +- no_undefined_flag_CXX='-bernotok' ++ allow_undefined_flag_CXX='-berok' + # Determine the default libpath from the value encoded in an empty + # executable. +- if test set = "${lt_cv_aix_libpath+set}"; then +- aix_libpath=$lt_cv_aix_libpath +-else +- if test ${lt_cv_aix_libpath__CXX+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_cxx_try_link "$LINENO" +-then : +- +- lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\([^ ]*\) *$/\1/ +- p +- } +- }' +- lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +- # Check for a 64-bit object if we didn't find anything. +- if test -z "$lt_cv_aix_libpath__CXX"; then +- lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +- fi +-fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ +- conftest$ac_exeext conftest.$ac_ext +- if test -z "$lt_cv_aix_libpath__CXX"; then +- lt_cv_aix_libpath__CXX=/usr/lib:/lib +- fi ++if ac_fn_cxx_try_link "$LINENO"; then : + ++lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\(.*\)$/\1/ ++ p ++ } ++ }' ++aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++# Check for a 64-bit object if we didn't find anything. ++if test -z "$aix_libpath"; then ++ aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +- +- aix_libpath=$lt_cv_aix_libpath__CXX + fi ++rm -f core conftest.err conftest.$ac_objext \ ++ conftest$ac_exeext conftest.$ac_ext ++if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + +- hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" ++ hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + +- archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag ++ archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else +- if test ia64 = "$host_cpu"; then +- hardcode_libdir_flag_spec_CXX='$wl-R $libdir:/usr/lib:/lib' ++ if test "$host_cpu" = ia64; then ++ hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag_CXX="-z nodefs" +- archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" ++ archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. +- if test set = "${lt_cv_aix_libpath+set}"; then +- aix_libpath=$lt_cv_aix_libpath +-else +- if test ${lt_cv_aix_libpath__CXX+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_cxx_try_link "$LINENO" +-then : +- +- lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\([^ ]*\) *$/\1/ +- p +- } +- }' +- lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +- # Check for a 64-bit object if we didn't find anything. +- if test -z "$lt_cv_aix_libpath__CXX"; then +- lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +- fi +-fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ +- conftest$ac_exeext conftest.$ac_ext +- if test -z "$lt_cv_aix_libpath__CXX"; then +- lt_cv_aix_libpath__CXX=/usr/lib:/lib +- fi ++if ac_fn_cxx_try_link "$LINENO"; then : + ++lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\(.*\)$/\1/ ++ p ++ } ++ }' ++aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++# Check for a 64-bit object if we didn't find anything. ++if test -z "$aix_libpath"; then ++ aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +- +- aix_libpath=$lt_cv_aix_libpath__CXX + fi ++rm -f core conftest.err conftest.$ac_objext \ ++ conftest$ac_exeext conftest.$ac_ext ++if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + +- hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" ++ hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. +- no_undefined_flag_CXX=' $wl-bernotok' +- allow_undefined_flag_CXX=' $wl-berok' +- if test yes = "$with_gnu_ld"; then ++ no_undefined_flag_CXX=' ${wl}-bernotok' ++ allow_undefined_flag_CXX=' ${wl}-berok' ++ if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. +- whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' ++ whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_CXX='$convenience' + fi + archive_cmds_need_lc_CXX=yes +- archive_expsym_cmds_CXX='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' +- # -brtl affects multiple linker settings, -berok does not and is overridden later +- compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' +- if test svr4 != "$with_aix_soname"; then +- # This is similar to how AIX traditionally builds its shared +- # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. +- archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' +- fi +- if test aix != "$with_aix_soname"; then +- archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' +- else +- # used by -dlpreopen to get the symbols +- archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$MV $output_objdir/$realname.d/$soname $output_objdir' +- fi +- archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$RM -r $output_objdir/$realname.d' ++ # This is similar to how AIX traditionally builds its shared ++ # libraries. ++ archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; +@@ -15615,7 +13223,7 @@ fi + allow_undefined_flag_CXX=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME +- archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs_CXX=no + fi +@@ -15631,76 +13239,29 @@ fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) +- case $GXX,$cc_basename in +- ,cl* | no,cl* | ,icl* | no,icl*) +- # Native MSVC or ICC +- # hardcode_libdir_flag_spec is actually meaningless, as there is +- # no search path for DLLs. +- hardcode_libdir_flag_spec_CXX=' ' +- allow_undefined_flag_CXX=unsupported +- always_export_symbols_CXX=yes +- file_list_spec_CXX='@' +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=.dll +- # FIXME: Setting linknames here is a bad hack. +- archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' +- archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then +- cp "$export_symbols" "$output_objdir/$soname.def"; +- echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; +- else +- $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; +- fi~ +- $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ +- linknames=' +- # The linker will not automatically build a static lib if we build a DLL. +- # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' +- enable_shared_with_static_runtimes_CXX=yes +- # Don't use ranlib +- old_postinstall_cmds_CXX='chmod 644 $oldlib' +- postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ +- lt_tool_outputfile="@TOOL_OUTPUT@"~ +- case $lt_outputfile in +- *.exe|*.EXE) ;; +- *) +- lt_outputfile=$lt_outputfile.exe +- lt_tool_outputfile=$lt_tool_outputfile.exe +- ;; +- esac~ +- func_to_tool_file "$lt_outputfile"~ +- if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then +- $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; +- $RM "$lt_outputfile.manifest"; +- fi' +- ;; +- *) +- # g++ +- # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, +- # as there is no search path for DLLs. +- hardcode_libdir_flag_spec_CXX='-L$libdir' +- export_dynamic_flag_spec_CXX='$wl--export-all-symbols' +- allow_undefined_flag_CXX=unsupported +- always_export_symbols_CXX=no +- enable_shared_with_static_runtimes_CXX=yes +- +- if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then +- archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +- # If the export-symbols file already is a .def file, use it as +- # is; otherwise, prepend EXPORTS... +- archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then +- cp $export_symbols $output_objdir/$soname.def; +- else +- echo EXPORTS > $output_objdir/$soname.def; +- cat $export_symbols >> $output_objdir/$soname.def; +- fi~ +- $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +- else +- ld_shlibs_CXX=no +- fi +- ;; +- esac +- ;; ++ # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, ++ # as there is no search path for DLLs. ++ hardcode_libdir_flag_spec_CXX='-L$libdir' ++ export_dynamic_flag_spec_CXX='${wl}--export-all-symbols' ++ allow_undefined_flag_CXX=unsupported ++ always_export_symbols_CXX=no ++ enable_shared_with_static_runtimes_CXX=yes ++ ++ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then ++ archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ # If the export-symbols file already is a .def file (1st line ++ # is EXPORTS), use it as is; otherwise, prepend... ++ archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ cp $export_symbols $output_objdir/$soname.def; ++ else ++ echo EXPORTS > $output_objdir/$soname.def; ++ cat $export_symbols >> $output_objdir/$soname.def; ++ fi~ ++ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ else ++ ld_shlibs_CXX=no ++ fi ++ ;; + darwin* | rhapsody*) + + +@@ -15708,27 +13269,26 @@ fi + hardcode_direct_CXX=no + hardcode_automatic_CXX=yes + hardcode_shlibpath_var_CXX=unsupported +- if test yes = "$lt_cv_ld_force_load"; then +- whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' +- ++ if test "$lt_cv_ld_force_load" = "yes"; then ++ whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + else + whole_archive_flag_spec_CXX='' + fi + link_all_deplibs_CXX=yes +- allow_undefined_flag_CXX=$_lt_dar_allow_undefined ++ allow_undefined_flag_CXX="$_lt_dar_allow_undefined" + case $cc_basename in +- ifort*|nagfor*) _lt_dar_can_shared=yes ;; ++ ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac +- if test yes = "$_lt_dar_can_shared"; then ++ if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=func_echo_all +- archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" +- module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" +- archive_expsym_cmds_CXX="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" +- module_expsym_cmds_CXX="$SED -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" +- if test yes != "$lt_cv_apple_cc_single_mod"; then +- archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" +- archive_expsym_cmds_CXX="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" ++ archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" ++ module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" ++ archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" ++ module_expsym_cmds_CXX="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" ++ if test "$lt_cv_apple_cc_single_mod" != "yes"; then ++ archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" ++ archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" + fi + + else +@@ -15737,35 +13297,6 @@ fi + + ;; + +- os2*) +- hardcode_libdir_flag_spec_CXX='-L$libdir' +- hardcode_minus_L_CXX=yes +- allow_undefined_flag_CXX=unsupported +- shrext_cmds=.dll +- archive_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ +- $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ +- $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ +- $ECHO EXPORTS >> $output_objdir/$libname.def~ +- emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ +- $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ +- emximp -o $lib $output_objdir/$libname.def' +- archive_expsym_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ +- $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ +- $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ +- $ECHO EXPORTS >> $output_objdir/$libname.def~ +- prefix_cmds="$SED"~ +- if test EXPORTS = "`$SED 1q $export_symbols`"; then +- prefix_cmds="$prefix_cmds -e 1d"; +- fi~ +- prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ +- cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ +- $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ +- emximp -o $lib $output_objdir/$libname.def' +- old_archive_From_new_cmds_CXX='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' +- enable_shared_with_static_runtimes_CXX=yes +- file_list_spec_CXX='@' +- ;; +- + dgux*) + case $cc_basename in + ec++*) +@@ -15794,21 +13325,24 @@ fi + archive_cmds_need_lc_CXX=no + ;; + +- freebsd* | dragonfly* | midnightbsd*) ++ freebsd* | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + ld_shlibs_CXX=yes + ;; + ++ gnu*) ++ ;; ++ + haiku*) +- archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' ++ archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + link_all_deplibs_CXX=yes + ;; + + hpux9*) +- hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' ++ hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: +- export_dynamic_flag_spec_CXX='$wl-E' ++ export_dynamic_flag_spec_CXX='${wl}-E' + hardcode_direct_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default +@@ -15820,7 +13354,7 @@ fi + ld_shlibs_CXX=no + ;; + aCC*) +- archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' ++ archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. +@@ -15829,11 +13363,11 @@ fi + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. +- output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ++ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) +- if test yes = "$GXX"; then +- archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' ++ if test "$GXX" = yes; then ++ archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no +@@ -15843,15 +13377,15 @@ fi + ;; + + hpux10*|hpux11*) +- if test no = "$with_gnu_ld"; then +- hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' ++ if test $with_gnu_ld = no; then ++ hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) +- export_dynamic_flag_spec_CXX='$wl-E' ++ export_dynamic_flag_spec_CXX='${wl}-E' + ;; + esac + fi +@@ -15877,13 +13411,13 @@ fi + aCC*) + case $host_cpu in + hppa*64*) +- archive_cmds_CXX='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) +- archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) +- archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists +@@ -15894,20 +13428,20 @@ fi + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. +- output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ++ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) +- if test yes = "$GXX"; then +- if test no = "$with_gnu_ld"; then ++ if test "$GXX" = yes; then ++ if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) +- archive_cmds_CXX='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) +- archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) +- archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi +@@ -15922,22 +13456,22 @@ fi + interix[3-9]*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no +- hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' +- export_dynamic_flag_spec_CXX='$wl-E' ++ hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' ++ export_dynamic_flag_spec_CXX='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. +- archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' +- archive_expsym_cmds_CXX='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ++ archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ++ archive_expsym_cmds_CXX='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ +- archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is +@@ -15946,22 +13480,22 @@ fi + old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) +- if test yes = "$GXX"; then +- if test no = "$with_gnu_ld"; then +- archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ if test "$GXX" = yes; then ++ if test "$with_gnu_ld" = no; then ++ archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else +- archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' ++ archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' + fi + fi + link_all_deplibs_CXX=yes + ;; + esac +- hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' ++ hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + inherit_rpath_CXX=yes + ;; + +- linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler +@@ -15969,8 +13503,8 @@ fi + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. +- archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' +- archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' ++ archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' ++ archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. +@@ -15979,10 +13513,10 @@ fi + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. +- output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ++ output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + +- hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' +- export_dynamic_flag_spec_CXX='$wl--export-dynamic' ++ hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' ++ export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. +@@ -15996,59 +13530,59 @@ fi + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) +- archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' +- archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac +- archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + archive_cmds_need_lc_CXX=no +- hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' +- export_dynamic_flag_spec_CXX='$wl--export-dynamic' +- whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' ++ hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' ++ export_dynamic_flag_spec_CXX='${wl}--export-dynamic' ++ whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [1-5].* | *pgcpp\ [1-5].*) + prelink_cmds_CXX='tpldir=Template.dir~ +- rm -rf $tpldir~ +- $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ +- compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' ++ rm -rf $tpldir~ ++ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ ++ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + old_archive_cmds_CXX='tpldir=Template.dir~ +- rm -rf $tpldir~ +- $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ +- $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ +- $RANLIB $oldlib' ++ rm -rf $tpldir~ ++ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ ++ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ ++ $RANLIB $oldlib' + archive_cmds_CXX='tpldir=Template.dir~ +- rm -rf $tpldir~ +- $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ +- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' ++ rm -rf $tpldir~ ++ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ ++ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + archive_expsym_cmds_CXX='tpldir=Template.dir~ +- rm -rf $tpldir~ +- $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ +- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ rm -rf $tpldir~ ++ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ ++ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols +- archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' +- archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' ++ archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + esac + +- hardcode_libdir_flag_spec_CXX='$wl--rpath $wl$libdir' +- export_dynamic_flag_spec_CXX='$wl--export-dynamic' +- whole_archive_flag_spec_CXX='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' ++ export_dynamic_flag_spec_CXX='${wl}--export-dynamic' ++ whole_archive_flag_spec_CXX='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + ;; + cxx*) + # Compaq C++ +- archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' +- archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' ++ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_CXX='-rpath $libdir' +@@ -16062,29 +13596,29 @@ fi + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. +- output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' ++ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' + ;; + xl* | mpixl* | bgxl*) + # IBM XL 8.0 on PPC, with GNU ld +- hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' +- export_dynamic_flag_spec_CXX='$wl--export-dynamic' +- archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' +- if test yes = "$supports_anon_versioning"; then ++ hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' ++ export_dynamic_flag_spec_CXX='${wl}--export-dynamic' ++ archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ +- cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ +- echo "local: *; };" >> $output_objdir/$libname.ver~ +- $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' ++ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ ++ echo "local: *; };" >> $output_objdir/$libname.ver~ ++ $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + ;; + *) +- case `$CC -V 2>&1 | $SED 5q` in ++ case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + no_undefined_flag_CXX=' -zdefs' +- archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' +- archive_expsym_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' ++ archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ archive_expsym_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' + hardcode_libdir_flag_spec_CXX='-R$libdir' +- whole_archive_flag_spec_CXX='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ++ whole_archive_flag_spec_CXX='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object_CXX=yes + + # Not sure whether something based on +@@ -16142,17 +13676,22 @@ fi + ld_shlibs_CXX=yes + ;; + +- openbsd* | bitrig*) ++ openbsd2*) ++ # C++ shared libraries are fairly broken ++ ld_shlibs_CXX=no ++ ;; ++ ++ openbsd*) + if test -f /usr/libexec/ld.so; then + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + hardcode_direct_absolute_CXX=yes + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' +- hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' +- if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then +- archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' +- export_dynamic_flag_spec_CXX='$wl-E' +- whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' ++ hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' ++ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then ++ archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' ++ export_dynamic_flag_spec_CXX='${wl}-E' ++ whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd=func_echo_all + else +@@ -16168,9 +13707,9 @@ fi + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. +- archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' ++ archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + +- hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' ++ hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + hardcode_libdir_separator_CXX=: + + # Archives containing C++ object files must be created using +@@ -16188,17 +13727,17 @@ fi + cxx*) + case $host in + osf3*) +- allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' +- archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' +- hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' ++ allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' ++ archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' ++ hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + ;; + *) + allow_undefined_flag_CXX=' -expect_unresolved \*' +- archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' ++ archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ +- echo "-hidden">> $lib.exp~ +- $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ +- $RM $lib.exp' ++ echo "-hidden">> $lib.exp~ ++ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~ ++ $RM $lib.exp' + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + ;; + esac +@@ -16213,21 +13752,21 @@ fi + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. +- output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ++ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) +- if test yes,no = "$GXX,$with_gnu_ld"; then +- allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' ++ if test "$GXX" = yes && test "$with_gnu_ld" = no; then ++ allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + case $host in + osf3*) +- archive_cmds_CXX='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + *) +- archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ++ archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + esac + +- hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' ++ hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists +@@ -16269,13 +13808,13 @@ fi + + solaris*) + case $cc_basename in +- CC* | sunCC*) ++ CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + archive_cmds_need_lc_CXX=yes + no_undefined_flag_CXX=' -zdefs' +- archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' ++ $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_shlibpath_var_CXX=no +@@ -16283,7 +13822,7 @@ fi + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, +- # but understands '-z linker_flag'. ++ # but understands `-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' + ;; +@@ -16300,30 +13839,30 @@ fi + ;; + gcx*) + # Green Hills C++ Compiler +- archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' ++ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker +- if test yes,no = "$GXX,$with_gnu_ld"; then +- no_undefined_flag_CXX=' $wl-z ${wl}defs' ++ if test "$GXX" = yes && test "$with_gnu_ld" = no; then ++ no_undefined_flag_CXX=' ${wl}-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then +- archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' ++ archive_cmds_CXX='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' ++ $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + else +- # g++ 2.7 appears to require '-G' NOT '-shared' on this ++ # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. +- archive_cmds_CXX='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' ++ archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' ++ $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when +@@ -16331,11 +13870,11 @@ fi + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + fi + +- hardcode_libdir_flag_spec_CXX='$wl-R $wl$libdir' ++ hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir' + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) +- whole_archive_flag_spec_CXX='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' ++ whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + ;; + esac + fi +@@ -16344,52 +13883,52 @@ fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) +- no_undefined_flag_CXX='$wl-z,text' ++ no_undefined_flag_CXX='${wl}-z,text' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) +- archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) +- archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + sysv5* | sco3.2v5* | sco5v6*) +- # Note: We CANNOT use -z defs as we might desire, because we do not ++ # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. +- no_undefined_flag_CXX='$wl-z,text' +- allow_undefined_flag_CXX='$wl-z,nodefs' ++ no_undefined_flag_CXX='${wl}-z,text' ++ allow_undefined_flag_CXX='${wl}-z,nodefs' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no +- hardcode_libdir_flag_spec_CXX='$wl-R,$libdir' ++ hardcode_libdir_flag_spec_CXX='${wl}-R,$libdir' + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes +- export_dynamic_flag_spec_CXX='$wl-Bexport' ++ export_dynamic_flag_spec_CXX='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) +- archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~ +- '"$old_archive_cmds_CXX" ++ '"$old_archive_cmds_CXX" + reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~ +- '"$reload_cmds_CXX" ++ '"$reload_cmds_CXX" + ;; + *) +- archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' +- archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; +@@ -16419,12 +13958,12 @@ fi + ;; + esac + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +-printf "%s\n" "$ld_shlibs_CXX" >&6; } +- test no = "$ld_shlibs_CXX" && can_build_shared=no ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 ++$as_echo "$ld_shlibs_CXX" >&6; } ++ test "$ld_shlibs_CXX" = no && can_build_shared=no + +- GCC_CXX=$GXX +- LD_CXX=$LD ++ GCC_CXX="$GXX" ++ LD_CXX="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change +@@ -16447,18 +13986,10 @@ private: + }; + _LT_EOF + +- +-_lt_libdeps_save_CFLAGS=$CFLAGS +-case "$CC $CFLAGS " in #( +-*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; +-*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; +-*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; +-esac +- + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. +@@ -16468,38 +13999,29 @@ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do +- case $prev$p in ++ case $p in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. +- if test x-L = "$p" || +- test x-R = "$p"; then ++ if test $p = "-L" || ++ test $p = "-R"; then + prev=$p + continue ++ else ++ prev= + fi + +- # Expand the sysroot to ease extracting the directories later. +- if test -z "$prev"; then +- case $p in +- -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; +- -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; +- -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; +- esac +- fi +- case $p in +- =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; +- esac +- if test no = "$pre_test_object_deps_done"; then +- case $prev in +- -L | -R) ++ if test "$pre_test_object_deps_done" = no; then ++ case $p in ++ -L* | -R*) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$compiler_lib_search_path_CXX"; then +- compiler_lib_search_path_CXX=$prev$p ++ compiler_lib_search_path_CXX="${prev}${p}" + else +- compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} $prev$p" ++ compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}" + fi + ;; + # The "-l" case would never come before the object being +@@ -16507,15 +14029,13 @@ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + esac + else + if test -z "$postdeps_CXX"; then +- postdeps_CXX=$prev$p ++ postdeps_CXX="${prev}${p}" + else +- postdeps_CXX="${postdeps_CXX} $prev$p" ++ postdeps_CXX="${postdeps_CXX} ${prev}${p}" + fi + fi +- prev= + ;; + +- *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. +@@ -16524,15 +14044,15 @@ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + continue + fi + +- if test no = "$pre_test_object_deps_done"; then ++ if test "$pre_test_object_deps_done" = no; then + if test -z "$predep_objects_CXX"; then +- predep_objects_CXX=$p ++ predep_objects_CXX="$p" + else + predep_objects_CXX="$predep_objects_CXX $p" + fi + else + if test -z "$postdep_objects_CXX"; then +- postdep_objects_CXX=$p ++ postdep_objects_CXX="$p" + else + postdep_objects_CXX="$postdep_objects_CXX $p" + fi +@@ -16551,7 +14071,6 @@ else + fi + + $RM -f confest.$objext +-CFLAGS=$_lt_libdeps_save_CFLAGS + + # PORTME: override above test on systems where it is broken + case $host_os in +@@ -16562,6 +14081,51 @@ interix[3-9]*) + postdep_objects_CXX= + postdeps_CXX= + ;; ++ ++linux*) ++ case `$CC -V 2>&1 | sed 5q` in ++ *Sun\ C*) ++ # Sun C++ 5.9 ++ ++ # The more standards-conforming stlport4 library is ++ # incompatible with the Cstd library. Avoid specifying ++ # it if it's in CXXFLAGS. Ignore libCrun as ++ # -library=stlport4 depends on it. ++ case " $CXX $CXXFLAGS " in ++ *" -library=stlport4 "*) ++ solaris_use_stlport4=yes ++ ;; ++ esac ++ ++ if test "$solaris_use_stlport4" != yes; then ++ postdeps_CXX='-library=Cstd -library=Crun' ++ fi ++ ;; ++ esac ++ ;; ++ ++solaris*) ++ case $cc_basename in ++ CC*) ++ # The more standards-conforming stlport4 library is ++ # incompatible with the Cstd library. Avoid specifying ++ # it if it's in CXXFLAGS. Ignore libCrun as ++ # -library=stlport4 depends on it. ++ case " $CXX $CXXFLAGS " in ++ *" -library=stlport4 "*) ++ solaris_use_stlport4=yes ++ ;; ++ esac ++ ++ # Adding this requires a known-good setup of shared libraries for ++ # Sun compiler versions before 5.6, else PIC objects from an old ++ # archive will be linked into the output, leading to subtle bugs. ++ if test "$solaris_use_stlport4" != yes; then ++ postdeps_CXX='-library=Cstd -library=Crun' ++ fi ++ ;; ++ esac ++ ;; + esac + + +@@ -16570,7 +14134,7 @@ case " $postdeps_CXX " in + esac + compiler_lib_search_dirs_CXX= + if test -n "${compiler_lib_search_path_CXX}"; then +- compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | $SED -e 's! -L! !g' -e 's!^ !!'` ++ compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` + fi + + +@@ -16607,16 +14171,18 @@ fi + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX= + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 ++$as_echo_n "checking for $compiler option to produce PIC... " >&6; } + + # C++ specific cases for pic, static, wl, etc. +- if test yes = "$GXX"; then ++ if test "$GXX" = yes; then + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-static' + + case $host_os in + aix*) + # All AIX code is PIC. +- if test ia64 = "$host_cpu"; then ++ if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + fi +@@ -16631,8 +14197,8 @@ lt_prog_compiler_static_CXX= + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but +- # adding the '-m68020' flag to GCC prevents building anything better, +- # like '-m68040'. ++ # adding the `-m68020' flag to GCC prevents building anything better, ++ # like `-m68040'. + lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' + ;; + esac +@@ -16647,11 +14213,6 @@ lt_prog_compiler_static_CXX= + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' +- case $host_os in +- os2*) +- lt_prog_compiler_static_CXX='$wl-static' +- ;; +- esac + ;; + darwin* | rhapsody*) + # PIC is the default on this platform +@@ -16701,7 +14262,7 @@ lt_prog_compiler_static_CXX= + case $host_os in + aix[4-9]*) + # All AIX code is PIC. +- if test ia64 = "$host_cpu"; then ++ if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + else +@@ -16716,11 +14277,6 @@ lt_prog_compiler_static_CXX= + ;; + esac + ;; +- mingw* | cygwin* | os2* | pw32* | cegcc*) +- # This hack is so that the source file can tell whether it is being +- # built for inclusion in a dll (and should export symbols for example). +- lt_prog_compiler_pic_CXX='-DDLL_EXPORT' +- ;; + dgux*) + case $cc_basename in + ec++*) +@@ -16734,21 +14290,21 @@ lt_prog_compiler_static_CXX= + ;; + esac + ;; +- freebsd* | dragonfly* | midnightbsd*) ++ freebsd* | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' +- lt_prog_compiler_static_CXX='$wl-a ${wl}archive' +- if test ia64 != "$host_cpu"; then ++ lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' ++ if test "$host_cpu" != ia64; then + lt_prog_compiler_pic_CXX='+Z' + fi + ;; + aCC*) + lt_prog_compiler_wl_CXX='-Wl,' +- lt_prog_compiler_static_CXX='$wl-a ${wl}archive' ++ lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default +@@ -16777,7 +14333,7 @@ lt_prog_compiler_static_CXX= + ;; + esac + ;; +- linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) ++ linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + KCC*) + # KAI C++ Compiler +@@ -16785,7 +14341,7 @@ lt_prog_compiler_static_CXX= + lt_prog_compiler_pic_CXX='-fPIC' + ;; + ecpc* ) +- # old Intel C++ for x86_64, which still supported -KPIC. ++ # old Intel C++ for x86_64 which still supported -KPIC. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-static' +@@ -16817,7 +14373,7 @@ lt_prog_compiler_static_CXX= + lt_prog_compiler_static_CXX='-qstaticlink' + ;; + *) +- case `$CC -V 2>&1 | $SED 5q` in ++ case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + lt_prog_compiler_pic_CXX='-KPIC' +@@ -16873,7 +14429,7 @@ lt_prog_compiler_static_CXX= + ;; + solaris*) + case $cc_basename in +- CC* | sunCC*) ++ CC*) + # Sun C++ 4.2, 5.x and Centerline C++ + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' +@@ -16930,7 +14486,7 @@ lt_prog_compiler_static_CXX= + fi + + case $host_os in +- # For platforms that do not support PIC, -DPIC is meaningless: ++ # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_CXX= + ;; +@@ -16938,33 +14494,24 @@ case $host_os in + lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" + ;; + esac ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic_CXX" >&5 ++$as_echo "$lt_prog_compiler_pic_CXX" >&6; } ++ + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +-printf %s "checking for $compiler option to produce PIC... " >&6; } +-if test ${lt_cv_prog_compiler_pic_CXX+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop +- lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX +-fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 +-printf "%s\n" "$lt_cv_prog_compiler_pic_CXX" >&6; } +-lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX + + # + # Check to make sure the PIC flag actually works. + # + if test -n "$lt_prog_compiler_pic_CXX"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 +-printf %s "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } +-if test ${lt_cv_prog_compiler_pic_works_CXX+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 ++$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } ++if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_prog_compiler_pic_works_CXX=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext +- lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" ## exclude from sc_useless_quotes_in_assignment ++ lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins +@@ -16991,10 +14538,10 @@ else $as_nop + $RM conftest* + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 +-printf "%s\n" "$lt_cv_prog_compiler_pic_works_CXX" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 ++$as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; } + +-if test yes = "$lt_cv_prog_compiler_pic_works_CXX"; then ++if test x"$lt_cv_prog_compiler_pic_works_CXX" = xyes; then + case $lt_prog_compiler_pic_CXX in + "" | " "*) ;; + *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; +@@ -17008,20 +14555,17 @@ fi + + + +- +- + # + # Check to make sure the static flag actually works. + # + wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +-printf %s "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +-if test ${lt_cv_prog_compiler_static_works_CXX+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 ++$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } ++if ${lt_cv_prog_compiler_static_works_CXX+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_prog_compiler_static_works_CXX=no +- save_LDFLAGS=$LDFLAGS ++ save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then +@@ -17040,13 +14584,13 @@ else $as_nop + fi + fi + $RM -r conftest* +- LDFLAGS=$save_LDFLAGS ++ LDFLAGS="$save_LDFLAGS" + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 +-printf "%s\n" "$lt_cv_prog_compiler_static_works_CXX" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 ++$as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; } + +-if test yes = "$lt_cv_prog_compiler_static_works_CXX"; then ++if test x"$lt_cv_prog_compiler_static_works_CXX" = xyes; then + : + else + lt_prog_compiler_static_CXX= +@@ -17055,12 +14599,11 @@ fi + + + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +-printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +-if test ${lt_cv_prog_compiler_c_o_CXX+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 ++$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } ++if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest +@@ -17103,17 +14646,16 @@ else $as_nop + $RM conftest* + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +-printf "%s\n" "$lt_cv_prog_compiler_c_o_CXX" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 ++$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } + + + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +-printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +-if test ${lt_cv_prog_compiler_c_o_CXX+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 ++$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } ++if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest +@@ -17156,28 +14698,28 @@ else $as_nop + $RM conftest* + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +-printf "%s\n" "$lt_cv_prog_compiler_c_o_CXX" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 ++$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } + + + + +-hard_links=nottested +-if test no = "$lt_cv_prog_compiler_c_o_CXX" && test no != "$need_locks"; then ++hard_links="nottested" ++if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +-printf %s "checking if we can lock with hard links... " >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 ++$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +-printf "%s\n" "$hard_links" >&6; } +- if test no = "$hard_links"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 +-printf "%s\n" "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 ++$as_echo "$hard_links" >&6; } ++ if test "$hard_links" = no; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 ++$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi + else +@@ -17186,49 +14728,37 @@ fi + + + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +-printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 ++$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +- exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + case $host_os in + aix[4-9]*) + # If we're using GNU nm, then we don't want the "-C" option. +- # -C means demangle to GNU nm, but means don't demangle to AIX nm. +- # Without the "-l" option, or with the "-B" option, AIX nm treats +- # weak defined symbols like other global defined symbols, whereas +- # GNU nm marks them as "W". +- # While the 'weak' keyword is ignored in the Export File, we need +- # it in the Import File for the 'aix-soname' feature, so we have +- # to replace the "-B" option with "-P" for AIX nm. ++ # -C means demangle to AIX nm, but means don't demangle with GNU nm ++ # Also, AIX nm treats weak defined symbols like other global defined ++ # symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then +- export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' ++ export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else +- export_symbols_cmds_CXX='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' ++ export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) +- export_symbols_cmds_CXX=$ltdll_cmds +- ;; ++ export_symbols_cmds_CXX="$ltdll_cmds" ++ ;; + cygwin* | mingw* | cegcc*) +- case $cc_basename in +- cl* | icl*) +- exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' +- ;; +- *) +- export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' +- exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' +- ;; +- esac +- ;; ++ export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;/^.*[ ]__nm__/s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' ++ ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +- ;; ++ ;; + esac ++ exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +-printf "%s\n" "$ld_shlibs_CXX" >&6; } +-test no = "$ld_shlibs_CXX" && can_build_shared=no ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 ++$as_echo "$ld_shlibs_CXX" >&6; } ++test "$ld_shlibs_CXX" = no && can_build_shared=no + + with_gnu_ld_CXX=$with_gnu_ld + +@@ -17245,7 +14775,7 @@ x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_CXX=yes + +- if test yes,yes = "$GCC,$enable_shared"; then ++ if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds_CXX in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. +@@ -17254,19 +14784,18 @@ x|xyes) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +-printf %s "checking whether -lc should be explicitly linked in... " >&6; } +-if test ${lt_cv_archive_cmds_need_lc_CXX+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 ++$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } ++if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest +@@ -17284,7 +14813,7 @@ else $as_nop + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc_CXX=no +@@ -17298,8 +14827,8 @@ else $as_nop + $RM conftest* + + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 +-printf "%s\n" "$lt_cv_archive_cmds_need_lc_CXX" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 ++$as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; } + archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX + ;; + esac +@@ -17368,13 +14897,15 @@ esac + + + +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +-printf %s "checking dynamic linker characteristics... " >&6; } ++ ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 ++$as_echo_n "checking dynamic linker characteristics... " >&6; } + + library_names_spec= + libname_spec='lib$name' + soname_spec= +-shrext_cmds=.so ++shrext_cmds=".so" + postinstall_cmds= + postuninstall_cmds= + finish_cmds= +@@ -17391,108 +14922,56 @@ hardcode_into_libs=no + # flags to be left without arguments + need_version=unknown + +- +- + case $host_os in + aix3*) +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$release$shared_ext$versuffix $libname.a' ++ version_type=linux ++ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. +- soname_spec='$libname$release$shared_ext$major' ++ soname_spec='${libname}${release}${shared_ext}$major' + ;; + + aix[4-9]*) +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes +- if test ia64 = "$host_cpu"; then ++ if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 +- library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' ++ library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with +- # the line '#! .'. This would cause the generated library to +- # depend on '.', always an invalid library. This was fixed in ++ # the line `#! .'. This would cause the generated library to ++ # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' +- echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then ++ echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac +- # Using Import Files as archive members, it is possible to support +- # filename-based versioning of shared library archives on AIX. While +- # this would work for both with and without runtime linking, it will +- # prevent static linking of such archives. So we do filename-based +- # shared library versioning with .so extension only, which is used +- # when both runtime linking and shared linking is enabled. +- # Unfortunately, runtime linking may impact performance, so we do +- # not want this to be the default eventually. Also, we use the +- # versioned .so libs for executables only if there is the -brtl +- # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. +- # To allow for filename-based versioning support, we need to create +- # libNAME.so.V as an archive file, containing: +- # *) an Import File, referring to the versioned filename of the +- # archive as well as the shared archive member, telling the +- # bitwidth (32 or 64) of that shared object, and providing the +- # list of exported symbols of that shared object, eventually +- # decorated with the 'weak' keyword +- # *) the shared object with the F_LOADONLY flag set, to really avoid +- # it being seen by the linker. +- # At run time we better use the real file rather than another symlink, +- # but for link time we create the symlink libNAME.so -> libNAME.so.V +- +- case $with_aix_soname,$aix_use_runtimelinking in +- # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct ++ # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. +- aix,yes) # traditional libtool +- dynamic_linker='AIX unversionable lib.so' ++ if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- ;; +- aix,no) # traditional AIX only +- dynamic_linker='AIX lib.a(lib.so.V)' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. +- library_names_spec='$libname$release.a $libname.a' +- soname_spec='$libname$release$shared_ext$major' +- ;; +- svr4,*) # full svr4 only +- dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" +- library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' +- # We do not specify a path in Import Files, so LIBPATH fires. +- shlibpath_overrides_runpath=yes +- ;; +- *,yes) # both, prefer svr4 +- dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" +- library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' +- # unpreferred sharedlib libNAME.a needs extra handling +- postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' +- postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' +- # We do not specify a path in Import Files, so LIBPATH fires. +- shlibpath_overrides_runpath=yes +- ;; +- *,no) # both, prefer aix +- dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" +- library_names_spec='$libname$release.a $libname.a' +- soname_spec='$libname$release$shared_ext$major' +- # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling +- postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' +- postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' +- ;; +- esac ++ library_names_spec='${libname}${release}.a $libname.a' ++ soname_spec='${libname}${release}${shared_ext}$major' ++ fi + shlibpath_var=LIBPATH + fi + ;; +@@ -17502,27 +14981,27 @@ amigaos*) + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. +- finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ++ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + + beos*) +- library_names_spec='$libname$shared_ext' ++ library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + + bsdi[45]*) +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" +@@ -17534,17 +15013,16 @@ bsdi[45]*) + + cygwin* | mingw* | pw32* | cegcc*) + version_type=windows +- shrext_cmds=.dll ++ shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + +- case $GCC,$cc_basename in +- yes,*) +- # gcc ++ case $GCC,$host_os in ++ yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds +- postinstall_cmds='base_file=`basename \$file`~ +- dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ ++ postinstall_cmds='base_file=`basename \${file}`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ +@@ -17560,83 +15038,25 @@ cygwin* | mingw* | pw32* | cegcc*) + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' +- soname_spec='`echo $libname | $SED -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ++ soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix +- soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ++ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' +- library_names_spec='`echo $libname | $SED -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' +- ;; +- esac +- dynamic_linker='Win32 ld.exe' +- ;; +- +- *,cl* | *,icl*) +- # Native MSVC or ICC +- libname_spec='$name' +- soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' +- library_names_spec='$libname.dll.lib' +- +- case $build_os in +- mingw*) +- sys_lib_search_path_spec= +- lt_save_ifs=$IFS +- IFS=';' +- for lt_path in $LIB +- do +- IFS=$lt_save_ifs +- # Let DOS variable expansion print the short 8.3 style file name. +- lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` +- sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" +- done +- IFS=$lt_save_ifs +- # Convert to MSYS style. +- sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` +- ;; +- cygwin*) +- # Convert to unix form, then to dos form, then back to unix form +- # but this time dos style (no spaces!) so that the unix form looks +- # like /cygdrive/c/PROGRA~1:/cygdr... +- sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` +- sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` +- sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` +- ;; +- *) +- sys_lib_search_path_spec=$LIB +- if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then +- # It is most probably a Windows format PATH. +- sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` +- else +- sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` +- fi +- # FIXME: find the short name or the path components, as spaces are +- # common. (e.g. "Program Files" -> "PROGRA~1") ++ library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac +- +- # DLL is installed to $(libdir)/../bin by postinstall_cmds +- postinstall_cmds='base_file=`basename \$file`~ +- dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ +- dldir=$destdir/`dirname \$dlpath`~ +- test -d \$dldir || mkdir -p \$dldir~ +- $install_prog $dir/$dlname \$dldir/$dlname' +- postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ +- dlpath=$dir/\$dldll~ +- $RM \$dlpath' +- shlibpath_overrides_runpath=yes +- dynamic_linker='Win32 link.exe' + ;; + + *) +- # Assume MSVC and ICC wrapper +- library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' +- dynamic_linker='Win32 ld.exe' ++ library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + ;; + esac ++ dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; +@@ -17646,8 +15066,8 @@ darwin* | rhapsody*) + version_type=darwin + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' +- soname_spec='$libname$release$major$shared_ext' ++ library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' ++ soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +@@ -17656,15 +15076,15 @@ darwin* | rhapsody*) + ;; + + dgux*) +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +-freebsd* | dragonfly* | midnightbsd*) ++freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then +@@ -17678,13 +15098,12 @@ freebsd* | dragonfly* | midnightbsd*) + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) +- library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac +@@ -17710,15 +15129,15 @@ freebsd* | dragonfly* | midnightbsd*) + ;; + + haiku*) +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH +- shlibpath_overrides_runpath=no +- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' ++ shlibpath_overrides_runpath=yes ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' + hardcode_into_libs=yes + ;; + +@@ -17735,15 +15154,14 @@ hpux9* | hpux10* | hpux11*) + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' +- if test 32 = "$HPUX_IA64_MODE"; then ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' ++ if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" +- sys_lib_dlsearch_path_spec=/usr/lib/hpux32 + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" +- sys_lib_dlsearch_path_spec=/usr/lib/hpux64 + fi ++ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' +@@ -17751,8 +15169,8 @@ hpux9* | hpux10* | hpux11*) + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; +@@ -17761,8 +15179,8 @@ hpux9* | hpux10* | hpux11*) + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... +@@ -17772,11 +15190,11 @@ hpux9* | hpux10* | hpux11*) + ;; + + interix[3-9]*) +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no +@@ -17787,16 +15205,16 @@ irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) +- if test yes = "$lt_cv_prog_gnu_ld"; then +- version_type=linux # correct to gnu/linux during the next big refactor ++ if test "$lt_cv_prog_gnu_ld" = yes; then ++ version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no +- soname_spec='$libname$release$shared_ext$major' +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' ++ soname_spec='${libname}${release}${shared_ext}$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= +@@ -17811,56 +15229,40 @@ irix5* | irix6* | nonstopux*) + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac +- ;; +- esac +- shlibpath_var=LD_LIBRARY${shlibsuff}_PATH +- shlibpath_overrides_runpath=no +- sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" +- sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" +- hardcode_into_libs=yes +- ;; +- +-# No shared lib support for Linux oldld, aout, or coff. +-linux*oldld* | linux*aout* | linux*coff*) +- dynamic_linker=no +- ;; +- +-linux*android*) +- version_type=none # Android doesn't support versioned libraries. +- need_lib_prefix=no +- need_version=no +- library_names_spec='$libname$release$shared_ext' +- soname_spec='$libname$release$shared_ext' +- finish_cmds= +- shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=yes +- +- # This implies no fast_install, which is unacceptable. +- # Some rework will be needed to allow for fast_install +- # before this can be enabled. ++ ;; ++ esac ++ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH ++ shlibpath_overrides_runpath=no ++ sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" ++ sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes ++ ;; + +- dynamic_linker='Android linker' +- # Don't embed -rpath directories since the linker doesn't support them. +- hardcode_libdir_flag_spec_CXX='-L$libdir' ++# No shared lib support for Linux oldld, aout, or coff. ++linux*oldld* | linux*aout* | linux*coff*) ++ dynamic_linker=no + ;; + +-# This must be glibc/ELF. +-linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) +- version_type=linux # correct to gnu/linux during the next big refactor ++# This must be Linux ELF. ++ ++# uclinux* changes (here and below) have been submitted to the libtool ++# project, but have not yet been accepted: they are GCC-local changes ++# for the time being. (See ++# https://lists.gnu.org/archive/html/libtool-patches/2018-05/msg00000.html) ++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu* | uclinuxfdpiceabi) ++ version_type=linux + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH +- if test ${lt_cv_shlibpath_overrides_runpath+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++ if ${lt_cv_shlibpath_overrides_runpath+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir +@@ -17870,21 +15272,19 @@ else $as_nop + /* end confdefs.h. */ + + int +-main (void) ++main () + { + + ; + return 0; + } + _ACEOF +-if ac_fn_cxx_try_link "$LINENO" +-then : +- if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null +-then : ++if ac_fn_cxx_try_link "$LINENO"; then : ++ if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : + lt_cv_shlibpath_overrides_runpath=yes + fi + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam \ ++rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir +@@ -17898,18 +15298,10 @@ fi + # before this can be enabled. + hardcode_into_libs=yes + +- # Add ABI-specific directories to the system library path. +- sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" +- +- # Ideally, we could use ldconfig to report *all* directores which are +- # searched for libraries, however this is still not possible. Aside from not +- # being certain /sbin/ldconfig is available, command +- # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, +- # even though it is searched at run-time. Try to do the best guess by +- # appending ld.so.conf contents (and includes) to the search path. ++ # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` +- sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" ++ sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on +@@ -17926,12 +15318,12 @@ netbsd*) + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then +- library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH +@@ -17940,8 +15332,8 @@ netbsd*) + ;; + + newsos6) +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ version_type=linux ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; +@@ -17950,68 +15342,58 @@ newsos6) + version_type=qnx + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +-openbsd* | bitrig*) ++openbsd*) + version_type=sunos +- sys_lib_dlsearch_path_spec=/usr/lib ++ sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no +- if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then +- need_version=no +- else +- need_version=yes +- fi +- library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. ++ case $host_os in ++ openbsd3.3 | openbsd3.3.*) need_version=yes ;; ++ *) need_version=no ;; ++ esac ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH +- shlibpath_overrides_runpath=yes ++ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then ++ case $host_os in ++ openbsd2.[89] | openbsd2.[89].*) ++ shlibpath_overrides_runpath=no ++ ;; ++ *) ++ shlibpath_overrides_runpath=yes ++ ;; ++ esac ++ else ++ shlibpath_overrides_runpath=yes ++ fi + ;; + + os2*) + libname_spec='$name' +- version_type=windows +- shrext_cmds=.dll +- need_version=no ++ shrext_cmds=".dll" + need_lib_prefix=no +- # OS/2 can only load a DLL with a base name of 8 characters or less. +- soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; +- v=$($ECHO $release$versuffix | tr -d .-); +- n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); +- $ECHO $n$v`$shared_ext' +- library_names_spec='${libname}_dll.$libext' ++ library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' +- shlibpath_var=BEGINLIBPATH +- sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +- sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec +- postinstall_cmds='base_file=`basename \$file`~ +- dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ +- dldir=$destdir/`dirname \$dlpath`~ +- test -d \$dldir || mkdir -p \$dldir~ +- $install_prog $dir/$dlname \$dldir/$dlname~ +- chmod a+x \$dldir/$dlname~ +- if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then +- eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; +- fi' +- postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ +- dlpath=$dir/\$dldll~ +- $RM \$dlpath' ++ shlibpath_var=LIBPATH + ;; + + osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no +- soname_spec='$libname$release$shared_ext$major' +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ soname_spec='${libname}${release}${shared_ext}$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" +- sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ++ sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + + rdos*) +@@ -18019,11 +15401,11 @@ rdos*) + ;; + + solaris*) +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes +@@ -18033,20 +15415,20 @@ solaris*) + + sunos4*) + version_type=sunos +- library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes +- if test yes = "$with_gnu_ld"; then ++ if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + + sysv4 | sysv4.3*) +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ version_type=linux ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) +@@ -18067,24 +15449,24 @@ sysv4 | sysv4.3*) + ;; + + sysv4*MP*) +- if test -d /usr/nec; then +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' +- soname_spec='$libname$shared_ext.$major' ++ if test -d /usr/nec ;then ++ version_type=linux ++ library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' ++ soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) +- version_type=sco ++ version_type=freebsd-elf + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes +- if test yes = "$with_gnu_ld"; then ++ if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' +@@ -18099,19 +15481,19 @@ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + + tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. +- version_type=linux # correct to gnu/linux during the next big refactor ++ version_type=linux + need_lib_prefix=no + need_version=no +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + + uts4*) +- version_type=linux # correct to gnu/linux during the next big refactor +- library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' +- soname_spec='$libname$release$shared_ext$major' ++ version_type=linux ++ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ++ soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +@@ -18119,32 +15501,22 @@ uts4*) + dynamic_linker=no + ;; + esac +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +-printf "%s\n" "$dynamic_linker" >&6; } +-test no = "$dynamic_linker" && can_build_shared=no ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 ++$as_echo "$dynamic_linker" >&6; } ++test "$dynamic_linker" = no && can_build_shared=no + + variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +-if test yes = "$GCC"; then ++if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" + fi + +-if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then +- sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec ++if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then ++ sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" + fi +- +-if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then +- sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec ++if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then ++ sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" + fi + +-# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... +-configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec +- +-# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code +-func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" +- +-# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool +-configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH +- + + + +@@ -18182,22 +15554,20 @@ configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH + + + +- +- +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +-printf %s "checking how to hardcode library paths into programs... " >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 ++$as_echo_n "checking how to hardcode library paths into programs... " >&6; } + hardcode_action_CXX= + if test -n "$hardcode_libdir_flag_spec_CXX" || + test -n "$runpath_var_CXX" || +- test yes = "$hardcode_automatic_CXX"; then ++ test "X$hardcode_automatic_CXX" = "Xyes" ; then + + # We can hardcode non-existent directories. +- if test no != "$hardcode_direct_CXX" && ++ if test "$hardcode_direct_CXX" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one +- ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" && +- test no != "$hardcode_minus_L_CXX"; then ++ ## test "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" != no && ++ test "$hardcode_minus_L_CXX" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_CXX=relink + else +@@ -18209,15 +15579,15 @@ else + # directories. + hardcode_action_CXX=unsupported + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 +-printf "%s\n" "$hardcode_action_CXX" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 ++$as_echo "$hardcode_action_CXX" >&6; } + +-if test relink = "$hardcode_action_CXX" || +- test yes = "$inherit_rpath_CXX"; then ++if test "$hardcode_action_CXX" = relink || ++ test "$inherit_rpath_CXX" = yes; then + # Fast installation is not supported + enable_fast_install=no +-elif test yes = "$shlibpath_overrides_runpath" || +- test no = "$enable_shared"; then ++elif test "$shlibpath_overrides_runpath" = yes || ++ test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless + fi +@@ -18231,7 +15601,6 @@ fi + fi # test -n "$compiler" + + CC=$lt_save_CC +- CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC +@@ -18240,7 +15609,7 @@ fi + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +-fi # test yes != "$_lt_caught_CXX_error" ++fi # test "$_lt_caught_CXX_error" != yes + + ac_ext=c + ac_cpp='$CPP $CPPFLAGS' +@@ -18260,8 +15629,6 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +- +- + ac_config_commands="$ac_config_commands libtool" + + +@@ -18270,14 +15637,38 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu + # Only expand once: + + +-ACX_LT_HOST_FLAGS ++ ++ ++ ++ ++case $host in ++ *-cygwin* | *-mingw*) ++ # 'host' will be top-level target in the case of a target lib, ++ # we must compare to with_cross_host to decide if this is a native ++ # or cross-compiler and select where to install dlls appropriately. ++ if test -n "$with_cross_host" && ++ test x"$with_cross_host" != x"no"; then ++ lt_host_flags='-no-undefined -bindir "$(toolexeclibdir)"'; ++ else ++ lt_host_flags='-no-undefined -bindir "$(bindir)"'; ++ fi ++ ;; ++ *) ++ lt_host_flags= ++ ;; ++esac ++ ++ ++ + + ac_fn_c_find_intX_t "$LINENO" "64" "ac_cv_c_int64_t" + case $ac_cv_c_int64_t in #( + no|yes) ;; #( + *) + +-printf "%s\n" "#define int64_t $ac_cv_c_int64_t" >>confdefs.h ++cat >>confdefs.h <<_ACEOF ++#define int64_t $ac_cv_c_int64_t ++_ACEOF + ;; + esac + +@@ -18286,19 +15677,20 @@ case $ac_cv_c_uint64_t in #( + no|yes) ;; #( + *) + +-printf "%s\n" "#define _UINT64_T 1" >>confdefs.h ++$as_echo "#define _UINT64_T 1" >>confdefs.h + + +-printf "%s\n" "#define uint64_t $ac_cv_c_uint64_t" >>confdefs.h ++cat >>confdefs.h <<_ACEOF ++#define uint64_t $ac_cv_c_uint64_t ++_ACEOF + ;; + esac + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for sys/wait.h that is POSIX.1 compatible" >&5 +-printf %s "checking for sys/wait.h that is POSIX.1 compatible... " >&6; } +-if test ${ac_cv_header_sys_wait_h+y} +-then : +- printf %s "(cached) " >&6 +-else $as_nop ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sys/wait.h that is POSIX.1 compatible" >&5 ++$as_echo_n "checking for sys/wait.h that is POSIX.1 compatible... " >&6; } ++if ${ac_cv_header_sys_wait_h+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + #include +@@ -18311,7 +15703,7 @@ else $as_nop + #endif + + int +-main (void) ++main () + { + int s; + wait (&s); +@@ -18320,19 +15712,18 @@ main (void) + return 0; + } + _ACEOF +-if ac_fn_c_try_compile "$LINENO" +-then : ++if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_sys_wait_h=yes +-else $as_nop ++else + ac_cv_header_sys_wait_h=no + fi +-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_sys_wait_h" >&5 +-printf "%s\n" "$ac_cv_header_sys_wait_h" >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_sys_wait_h" >&5 ++$as_echo "$ac_cv_header_sys_wait_h" >&6; } + if test $ac_cv_header_sys_wait_h = yes; then + +-printf "%s\n" "#define HAVE_SYS_WAIT_H 1" >>confdefs.h ++$as_echo "#define HAVE_SYS_WAIT_H 1" >>confdefs.h + + fi + +@@ -18367,8 +15758,8 @@ _ACEOF + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( +- *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +-printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; ++ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 ++$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( +@@ -18398,15 +15789,15 @@ printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} + /^ac_cv_env_/b end + t clear + :clear +- s/^\([^=]*\)=\(.*[{}].*\)$/test ${\1+y} || &/ ++ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache + if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +-printf "%s\n" "$as_me: updating cache $cache_file" >&6;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 ++$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else +@@ -18420,8 +15811,8 @@ printf "%s\n" "$as_me: updating cache $cache_file" >&6;} + fi + fi + else +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +-printf "%s\n" "$as_me: not updating unwritable cache $cache_file" >&6;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 ++$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi + fi + rm -f confcache +@@ -18438,7 +15829,7 @@ U= + for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' +- ac_i=`printf "%s\n" "$ac_i" | sed "$ac_script"` ++ ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" +@@ -18449,14 +15840,14 @@ LIBOBJS=$ac_libobjs + LTLIBOBJS=$ac_ltlibobjs + + +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 +-printf %s "checking that generated files are newer than configure... " >&6; } ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 ++$as_echo_n "checking that generated files are newer than configure... " >&6; } + if test -n "$am_sleep_pid"; then + # Hide warnings about reused PIDs. + wait $am_sleep_pid 2>/dev/null + fi +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: done" >&5 +-printf "%s\n" "done" >&6; } ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5 ++$as_echo "done" >&6; } + if test -n "$EXEEXT"; then + am__EXEEXT_TRUE= + am__EXEEXT_FALSE='#' +@@ -18490,8 +15881,8 @@ fi + ac_write_fail=0 + ac_clean_files_save=$ac_clean_files + ac_clean_files="$ac_clean_files $CONFIG_STATUS" +-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +-printf "%s\n" "$as_me: creating $CONFIG_STATUS" >&6;} ++{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 ++$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} + as_write_fail=0 + cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 + #! $SHELL +@@ -18514,16 +15905,14 @@ cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 + + # Be more Bourne compatible + DUALCASE=1; export DUALCASE # for MKS sh +-as_nop=: +-if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 +-then : ++if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +-else $as_nop ++else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( +@@ -18533,46 +15922,46 @@ esac + fi + + +- +-# Reset variables that may have inherited troublesome values from +-# the environment. +- +-# IFS needs to be set, to space, tab, and newline, in precisely that order. +-# (If _AS_PATH_WALK were called with IFS unset, it would have the +-# side effect of setting IFS to empty, thus disabling word splitting.) +-# Quoting is to prevent editors from complaining about space-tab. + as_nl=' + ' + export as_nl +-IFS=" "" $as_nl" +- +-PS1='$ ' +-PS2='> ' +-PS4='+ ' +- +-# Ensure predictable behavior from utilities with locale-dependent output. +-LC_ALL=C +-export LC_ALL +-LANGUAGE=C +-export LANGUAGE +- +-# We cannot yet rely on "unset" to work, but we need these variables +-# to be unset--not just set to an empty or harmless value--now, to +-# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh). This construct +-# also avoids known problems related to "unset" and subshell syntax +-# in other old shells (e.g. bash 2.01 and pdksh 5.2.14). +-for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH +-do eval test \${$as_var+y} \ +- && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +-done +- +-# Ensure that fds 0, 1, and 2 are open. +-if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi +-if (exec 3>&2) ; then :; else exec 2>/dev/null; fi ++# Printing a long string crashes Solaris 7 /usr/bin/printf. ++as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ++as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo ++as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo ++# Prefer a ksh shell builtin over an external printf program on Solaris, ++# but without wasting forks for bash or zsh. ++if test -z "$BASH_VERSION$ZSH_VERSION" \ ++ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then ++ as_echo='print -r --' ++ as_echo_n='print -rn --' ++elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then ++ as_echo='printf %s\n' ++ as_echo_n='printf %s' ++else ++ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then ++ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' ++ as_echo_n='/usr/ucb/echo -n' ++ else ++ as_echo_body='eval expr "X$1" : "X\\(.*\\)"' ++ as_echo_n_body='eval ++ arg=$1; ++ case $arg in #( ++ *"$as_nl"*) ++ expr "X$arg" : "X\\(.*\\)$as_nl"; ++ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; ++ esac; ++ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ++ ' ++ export as_echo_n_body ++ as_echo_n='sh -c $as_echo_n_body as_echo' ++ fi ++ export as_echo_body ++ as_echo='sh -c $as_echo_body as_echo' ++fi + + # The user is always right. +-if ${PATH_SEPARATOR+false} :; then ++if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || +@@ -18581,6 +15970,13 @@ if ${PATH_SEPARATOR+false} :; then + fi + + ++# IFS ++# We need space, tab and new line, in precisely that order. Quoting is ++# there to prevent editors from complaining about space-tab. ++# (If _AS_PATH_WALK were called with IFS unset, it would disable word ++# splitting by setting IFS to empty value.) ++IFS=" "" $as_nl" ++ + # Find who we are. Look in the path if we contain no directory separator. + as_myself= + case $0 in #(( +@@ -18589,12 +15985,8 @@ case $0 in #(( + for as_dir in $PATH + do + IFS=$as_save_IFS +- case $as_dir in #((( +- '') as_dir=./ ;; +- */) ;; +- *) as_dir=$as_dir/ ;; +- esac +- test -r "$as_dir$0" && as_myself=$as_dir$0 && break ++ test -z "$as_dir" && as_dir=. ++ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done + IFS=$as_save_IFS + +@@ -18606,10 +15998,30 @@ if test "x$as_myself" = x; then + as_myself=$0 + fi + if test ! -f "$as_myself"; then +- printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 ++ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 + fi + ++# Unset variables that we do not need and which cause bugs (e.g. in ++# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" ++# suppresses any "Segmentation fault" message there. '((' could ++# trigger a bug in pdksh 5.2.14. ++for as_var in BASH_ENV ENV MAIL MAILPATH ++do eval test x\${$as_var+set} = xset \ ++ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : ++done ++PS1='$ ' ++PS2='> ' ++PS4='+ ' ++ ++# NLS nuisances. ++LC_ALL=C ++export LC_ALL ++LANGUAGE=C ++export LANGUAGE ++ ++# CDPATH. ++(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + + # as_fn_error STATUS ERROR [LINENO LOG_FD] +@@ -18622,14 +16034,13 @@ as_fn_error () + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack +- printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 ++ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi +- printf "%s\n" "$as_me: error: $2" >&2 ++ $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status + } # as_fn_error + + +- + # as_fn_set_status STATUS + # ----------------------- + # Set $? to STATUS, without forking. +@@ -18656,20 +16067,18 @@ as_fn_unset () + { eval $1=; unset $1;} + } + as_unset=as_fn_unset +- + # as_fn_append VAR VALUE + # ---------------------- + # Append the text in VALUE to the end of the definition contained in VAR. Take + # advantage of any shell optimizations that allow amortized linear growth over + # repeated appends, instead of the typical quadratic growth present in naive + # implementations. +-if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null +-then : ++if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +-else $as_nop ++else + as_fn_append () + { + eval $1=\$$1\$2 +@@ -18681,13 +16090,12 @@ fi # as_fn_append + # Perform arithmetic evaluation on the ARGs, and store the result in the + # global $as_val. Take advantage of shells that can avoid forks. The arguments + # must be portable across $(()) and expr. +-if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null +-then : ++if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +-else $as_nop ++else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` +@@ -18718,7 +16126,7 @@ as_me=`$as_basename -- "$0" || + $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +-printf "%s\n" X/"$0" | ++$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q +@@ -18740,10 +16148,6 @@ as_cr_Letters=$as_cr_letters$as_cr_LETTERS + as_cr_digits='0123456789' + as_cr_alnum=$as_cr_Letters$as_cr_digits + +- +-# Determine whether it's possible to make 'echo' print without a newline. +-# These variables are no longer used directly by Autoconf, but are AC_SUBSTed +-# for compatibility with existing Makefiles. + ECHO_C= ECHO_N= ECHO_T= + case `echo -n x` in #((((( + -n*) +@@ -18757,12 +16161,6 @@ case `echo -n x` in #((((( + ECHO_N='-n';; + esac + +-# For backward compatibility with old third-party macros, we provide +-# the shell variables $as_echo and $as_echo_n. New code should use +-# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively. +-as_echo='printf %s\n' +-as_echo_n='printf %s' +- + rm -f conf$$ conf$$.exe conf$$.file + if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +@@ -18804,7 +16202,7 @@ as_fn_mkdir_p () + as_dirs= + while :; do + case $as_dir in #( +- *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( ++ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" +@@ -18813,7 +16211,7 @@ $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +-printf "%s\n" X"$as_dir" | ++$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q +@@ -18876,7 +16274,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + # values after options handling. + ac_log=" + This file was extended by bolt plugin for ld $as_me 0.1, which was +-generated by GNU Autoconf 2.71. Invocation command line was ++generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS +@@ -18938,16 +16336,14 @@ $config_commands + Report bugs to the package provider." + + _ACEOF +-ac_cs_config=`printf "%s\n" "$ac_configure_args" | sed "$ac_safe_unquote"` +-ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\''/g"` + cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +-ac_cs_config='$ac_cs_config_escaped' ++ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" + ac_cs_version="\\ + bolt plugin for ld config.status 0.1 +-configured by $0, generated by GNU Autoconf 2.71, ++configured by $0, generated by GNU Autoconf 2.69, + with options \\"\$ac_cs_config\\" + +-Copyright (C) 2021 Free Software Foundation, Inc. ++Copyright (C) 2012 Free Software Foundation, Inc. + This config.status script is free software; the Free Software Foundation + gives unlimited permission to copy, distribute and modify it." + +@@ -18987,15 +16383,15 @@ do + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) +- printf "%s\n" "$ac_cs_version"; exit ;; ++ $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) +- printf "%s\n" "$ac_cs_config"; exit ;; ++ $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in +- *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; ++ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" +@@ -19003,7 +16399,7 @@ do + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in +- *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; ++ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; +@@ -19012,7 +16408,7 @@ do + as_fn_error $? "ambiguous option: \`$1' + Try \`$0 --help' for more information.";; + --help | --hel | -h ) +- printf "%s\n" "$ac_cs_usage"; exit ;; ++ $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; +@@ -19040,7 +16436,7 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + if \$ac_cs_recheck; then + set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift +- \printf "%s\n" "running CONFIG_SHELL=$SHELL \$*" >&6 ++ \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +@@ -19054,7 +16450,7 @@ exec 5>>config.log + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX + ## Running $as_me. ## + _ASBOX +- printf "%s\n" "$ac_log" ++ $as_echo "$ac_log" + } >&5 + + _ACEOF +@@ -19078,10 +16474,8 @@ enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' + enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' + pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' + enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' +-shared_archive_member_spec='`$ECHO "$shared_archive_member_spec" | $SED "$delay_single_quote_subst"`' + SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' + ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' +-PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' + host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' + host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' + host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' +@@ -19102,22 +16496,13 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' + lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' + lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' + lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' +-lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' +-lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' + reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' + reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' +-FILECMD='`$ECHO "$FILECMD" | $SED "$delay_single_quote_subst"`' + OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' + deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' + file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' +-file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' +-want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' +-DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' +-sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' + AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' +-lt_ar_flags='`$ECHO "$lt_ar_flags" | $SED "$delay_single_quote_subst"`' + AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' +-archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' + STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' + RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' + old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +@@ -19130,22 +16515,16 @@ compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' + GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' +-lt_cv_sys_global_symbol_to_import='`$ECHO "$lt_cv_sys_global_symbol_to_import" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' +-lt_cv_nm_interface='`$ECHO "$lt_cv_nm_interface" | $SED "$delay_single_quote_subst"`' +-nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' +-lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' +-lt_cv_truncate_bin='`$ECHO "$lt_cv_truncate_bin" | $SED "$delay_single_quote_subst"`' + objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' + MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +-lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' + lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' + need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' +-MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' + DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' + NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' + LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +@@ -19169,6 +16548,7 @@ with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' + allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' + no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' + hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' ++hardcode_libdir_flag_spec_ld='`$ECHO "$hardcode_libdir_flag_spec_ld" | $SED "$delay_single_quote_subst"`' + hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' + hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' + hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' +@@ -19177,12 +16557,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q + hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' + inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' + link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' ++fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' + always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' + export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' + exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' + include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' + prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' +-postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' + file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' + variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' + need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +@@ -19201,8 +16581,7 @@ finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' + finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' + hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' + sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' +-configure_time_dlsearch_path='`$ECHO "$configure_time_dlsearch_path" | $SED "$delay_single_quote_subst"`' +-configure_time_lt_sys_library_path='`$ECHO "$configure_time_lt_sys_library_path" | $SED "$delay_single_quote_subst"`' ++sys_lib_dlsearch_path_spec='`$ECHO "$sys_lib_dlsearch_path_spec" | $SED "$delay_single_quote_subst"`' + hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' + enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' + enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' +@@ -19222,8 +16601,8 @@ old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote + compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' + GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' +-lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' + lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' + archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' +@@ -19241,6 +16620,7 @@ with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`' + allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' + no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' + hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' ++hardcode_libdir_flag_spec_ld_CXX='`$ECHO "$hardcode_libdir_flag_spec_ld_CXX" | $SED "$delay_single_quote_subst"`' + hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`' + hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`' + hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`' +@@ -19249,12 +16629,12 @@ hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_ + hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' + inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' + link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' ++fix_srcfile_path_CXX='`$ECHO "$fix_srcfile_path_CXX" | $SED "$delay_single_quote_subst"`' + always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' + export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' + exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' + include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' + prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' +-postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' + file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' + hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' + compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' +@@ -19279,7 +16659,6 @@ _LTECHO_EOF' + # Quote evaled strings. + for var in SHELL \ + ECHO \ +-PATH_SEPARATOR \ + SED \ + GREP \ + EGREP \ +@@ -19290,16 +16669,11 @@ LN_S \ + lt_SP2NL \ + lt_NL2SP \ + reload_flag \ +-FILECMD \ + OBJDUMP \ + deplibs_check_method \ + file_magic_cmd \ +-file_magic_glob \ +-want_nocaseglob \ +-DLLTOOL \ +-sharedlib_from_linklib_cmd \ + AR \ +-archiver_list_spec \ ++AR_FLAGS \ + STRIP \ + RANLIB \ + CC \ +@@ -19307,19 +16681,14 @@ CFLAGS \ + compiler \ + lt_cv_sys_global_symbol_pipe \ + lt_cv_sys_global_symbol_to_cdecl \ +-lt_cv_sys_global_symbol_to_import \ + lt_cv_sys_global_symbol_to_c_name_address \ + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ +-lt_cv_nm_interface \ +-nm_file_list_spec \ +-lt_cv_truncate_bin \ + lt_prog_compiler_no_builtin_flag \ +-lt_prog_compiler_pic \ + lt_prog_compiler_wl \ ++lt_prog_compiler_pic \ + lt_prog_compiler_static \ + lt_cv_prog_compiler_c_o \ + need_locks \ +-MANIFEST_TOOL \ + DSYMUTIL \ + NMEDIT \ + LIPO \ +@@ -19333,7 +16702,9 @@ with_gnu_ld \ + allow_undefined_flag \ + no_undefined_flag \ + hardcode_libdir_flag_spec \ ++hardcode_libdir_flag_spec_ld \ + hardcode_libdir_separator \ ++fix_srcfile_path \ + exclude_expsyms \ + include_expsyms \ + file_list_spec \ +@@ -19355,8 +16726,8 @@ LD_CXX \ + reload_flag_CXX \ + compiler_CXX \ + lt_prog_compiler_no_builtin_flag_CXX \ +-lt_prog_compiler_pic_CXX \ + lt_prog_compiler_wl_CXX \ ++lt_prog_compiler_pic_CXX \ + lt_prog_compiler_static_CXX \ + lt_cv_prog_compiler_c_o_CXX \ + export_dynamic_flag_spec_CXX \ +@@ -19366,7 +16737,9 @@ with_gnu_ld_CXX \ + allow_undefined_flag_CXX \ + no_undefined_flag_CXX \ + hardcode_libdir_flag_spec_CXX \ ++hardcode_libdir_flag_spec_ld_CXX \ + hardcode_libdir_separator_CXX \ ++fix_srcfile_path_CXX \ + exclude_expsyms_CXX \ + include_expsyms_CXX \ + file_list_spec_CXX \ +@@ -19378,7 +16751,7 @@ postdeps_CXX \ + compiler_lib_search_path_CXX; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) +- eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ++ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" +@@ -19400,13 +16773,11 @@ module_cmds \ + module_expsym_cmds \ + export_symbols_cmds \ + prelink_cmds \ +-postlink_cmds \ + postinstall_cmds \ + postuninstall_cmds \ + finish_cmds \ + sys_lib_search_path_spec \ +-configure_time_dlsearch_path \ +-configure_time_lt_sys_library_path \ ++sys_lib_dlsearch_path_spec \ + reload_cmds_CXX \ + old_archive_cmds_CXX \ + old_archive_from_new_cmds_CXX \ +@@ -19416,11 +16787,10 @@ archive_expsym_cmds_CXX \ + module_cmds_CXX \ + module_expsym_cmds_CXX \ + export_symbols_cmds_CXX \ +-prelink_cmds_CXX \ +-postlink_cmds_CXX; do ++prelink_cmds_CXX; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) +- eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ++ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" +@@ -19429,16 +16799,19 @@ postlink_cmds_CXX; do + done + + ac_aux_dir='$ac_aux_dir' ++xsi_shell='$xsi_shell' ++lt_shell_append='$lt_shell_append' + +-# See if we are running on zsh, and set the options that allow our ++# See if we are running on zsh, and set the options which allow our + # commands through without removal of \ escapes INIT. +-if test -n "\${ZSH_VERSION+set}"; then ++if test -n "\${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + + + PACKAGE='$PACKAGE' + VERSION='$VERSION' ++ TIMESTAMP='$TIMESTAMP' + RM='$RM' + ofile='$ofile' + +@@ -19470,9 +16843,9 @@ done + # We use the long form for the default assignment because of an extremely + # bizarre bug on SunOS 4.1.3. + if $ac_need_defaults; then +- test ${CONFIG_FILES+y} || CONFIG_FILES=$config_files +- test ${CONFIG_HEADERS+y} || CONFIG_HEADERS=$config_headers +- test ${CONFIG_COMMANDS+y} || CONFIG_COMMANDS=$config_commands ++ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files ++ test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers ++ test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands + fi + + # Have a temporary directory for convenience. Make it in the build tree +@@ -19808,7 +17181,7 @@ do + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac +- case $ac_f in *\'*) ac_f=`printf "%s\n" "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac ++ case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + +@@ -19816,17 +17189,17 @@ do + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` +- printf "%s\n" "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' ++ $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +-printf "%s\n" "$as_me: creating $ac_file" >&6;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 ++$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) +- ac_sed_conf_input=`printf "%s\n" "$configure_input" | ++ ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac +@@ -19843,7 +17216,7 @@ $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +-printf "%s\n" X"$ac_file" | ++$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q +@@ -19867,9 +17240,9 @@ printf "%s\n" X"$ac_file" | + case "$ac_dir" in + .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) +- ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'` ++ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. +- ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` ++ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; +@@ -19931,8 +17304,8 @@ ac_sed_dataroot=' + case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in + *datarootdir*) ac_datarootdir_seen=yes;; + *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +-printf "%s\n" "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 ++$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} + _ACEOF + cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' +@@ -19976,9 +17349,9 @@ test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' + which seems to be undefined. Please make sure it is defined" >&5 +-printf "%s\n" "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' ++$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' + which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" +@@ -19994,20 +17367,20 @@ which seems to be undefined. Please make sure it is defined" >&2;} + # + if test x"$ac_file" != x-; then + { +- printf "%s\n" "/* $configure_input */" >&1 \ ++ $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +-printf "%s\n" "$as_me: $ac_file is unchanged" >&6;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 ++$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else +- printf "%s\n" "/* $configure_input */" >&1 \ ++ $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi +@@ -20027,7 +17400,7 @@ $as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$_am_arg" : 'X\(//\)[^/]' \| \ + X"$_am_arg" : 'X\(//\)$' \| \ + X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || +-printf "%s\n" X"$_am_arg" | ++$as_echo X"$_am_arg" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q +@@ -20047,8 +17420,8 @@ printf "%s\n" X"$_am_arg" | + s/.*/./; q'`/stamp-h$_am_stamp_count + ;; + +- :C) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +-printf "%s\n" "$as_me: executing $ac_file commands" >&6;} ++ :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 ++$as_echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac + +@@ -20074,7 +17447,7 @@ esac + for am_mf + do + # Strip MF so we end up with the name of the file. +- am_mf=`printf "%s\n" "$am_mf" | sed -e 's/:.*$//'` ++ am_mf=`$as_echo "$am_mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile which includes + # dependency-tracking related rules and includes. + # Grep'ing the whole file directly is not great: AIX grep has a line +@@ -20086,7 +17459,7 @@ $as_expr X"$am_mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$am_mf" : 'X\(//\)[^/]' \| \ + X"$am_mf" : 'X\(//\)$' \| \ + X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || +-printf "%s\n" X"$am_mf" | ++$as_echo X"$am_mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q +@@ -20108,7 +17481,7 @@ printf "%s\n" X"$am_mf" | + $as_expr X/"$am_mf" : '.*/\([^/][^/]*\)/*$' \| \ + X"$am_mf" : 'X\(//\)$' \| \ + X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || +-printf "%s\n" X/"$am_mf" | ++$as_echo X/"$am_mf" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q +@@ -20133,8 +17506,8 @@ printf "%s\n" X/"$am_mf" | + (exit $ac_status); } || am_rc=$? + done + if test $am_rc -ne 0; then +- { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} ++ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 ++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + as_fn_error $? "Something went wrong bootstrapping makefile fragments + for automatic dependency tracking. If GNU make was not used, consider + re-running the configure script with MAKE=\"gmake\" (or whatever is +@@ -20152,53 +17525,54 @@ See \`config.log' for more details" "$LINENO" 5; } + ;; + "libtool":C) + +- # See if we are running on zsh, and set the options that allow our ++ # See if we are running on zsh, and set the options which allow our + # commands through without removal of \ escapes. +- if test -n "${ZSH_VERSION+set}"; then ++ if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + +- cfgfile=${ofile}T ++ cfgfile="${ofile}T" + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" + #! $SHELL +-# Generated automatically by $as_me ($PACKAGE) $VERSION ++ ++# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. ++# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION + # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: + # NOTE: Changes made to this file will be lost: look at ltmain.sh. +- +-# Provide generalized library-building support services. +-# Written by Gordon Matzigkeit, 1996 +- +-# Copyright (C) 2014 Free Software Foundation, Inc. +-# This is free software; see the source for copying conditions. There is NO +-# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +- +-# GNU Libtool is free software; you can redistribute it and/or modify +-# it under the terms of the GNU General Public License as published by +-# the Free Software Foundation; either version 2 of of the License, or +-# (at your option) any later version. + # +-# As a special exception to the GNU General Public License, if you +-# distribute this file as part of a program or library that is built +-# using GNU Libtool, you may include this file under the same +-# distribution terms that you use for the rest of that program. ++# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, ++# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. ++# Written by Gordon Matzigkeit, 1996 + # +-# GNU Libtool is distributed in the hope that it will be useful, but +-# WITHOUT ANY WARRANTY; without even the implied warranty of ++# This file is part of GNU Libtool. ++# ++# GNU Libtool is free software; you can redistribute it and/or ++# modify it under the terms of the GNU General Public License as ++# published by the Free Software Foundation; either version 2 of ++# the License, or (at your option) any later version. ++# ++# As a special exception to the GNU General Public License, ++# if you distribute this file as part of a program or library that ++# is built using GNU Libtool, you may include this file under the ++# same distribution terms that you use for the rest of that program. ++# ++# GNU Libtool is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of + # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + # GNU General Public License for more details. + # + # You should have received a copy of the GNU General Public License +-# along with this program. If not, see . ++# along with GNU Libtool; see the file COPYING. If not, a copy ++# can be downloaded from http://www.gnu.org/licenses/gpl.html, or ++# obtained by writing to the Free Software Foundation, Inc., ++# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + + # The names of the tagged configurations supported by this script. +-available_tags='CXX ' +- +-# Configured defaults for sys_lib_dlsearch_path munging. +-: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} ++available_tags="CXX " + + # ### BEGIN LIBTOOL CONFIG + +@@ -20218,18 +17592,12 @@ pic_mode=$pic_mode + # Whether or not to optimize for fast installation. + fast_install=$enable_fast_install + +-# Shared archive member basename,for filename based shared library versioning on AIX. +-shared_archive_member_spec=$shared_archive_member_spec +- + # Shell to use when invoking shell scripts. + SHELL=$lt_SHELL + + # An echo program that protects backslashes. + ECHO=$lt_ECHO + +-# The PATH separator for the build system. +-PATH_SEPARATOR=$lt_PATH_SEPARATOR +- + # The host system. + host_alias=$host_alias + host=$host +@@ -20279,47 +17647,18 @@ SP2NL=$lt_lt_SP2NL + # turn newlines into spaces. + NL2SP=$lt_lt_NL2SP + +-# convert \$build file names to \$host format. +-to_host_file_cmd=$lt_cv_to_host_file_cmd +- +-# convert \$build files to toolchain format. +-to_tool_file_cmd=$lt_cv_to_tool_file_cmd +- +-# A file(cmd) program that detects file types. +-FILECMD=$lt_FILECMD +- + # An object symbol dumper. + OBJDUMP=$lt_OBJDUMP + + # Method to check whether dependent libraries are shared objects. + deplibs_check_method=$lt_deplibs_check_method + +-# Command to use when deplibs_check_method = "file_magic". ++# Command to use when deplibs_check_method == "file_magic". + file_magic_cmd=$lt_file_magic_cmd + +-# How to find potential files when deplibs_check_method = "file_magic". +-file_magic_glob=$lt_file_magic_glob +- +-# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +-want_nocaseglob=$lt_want_nocaseglob +- +-# DLL creation program. +-DLLTOOL=$lt_DLLTOOL +- +-# Command to associate shared and link libraries. +-sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd +- + # The archiver. + AR=$lt_AR +- +-# Flags to create an archive (by configure). +-lt_ar_flags=$lt_ar_flags +- +-# Flags to create an archive. +-AR_FLAGS=\${ARFLAGS-"\$lt_ar_flags"} +- +-# How to feed a file listing to the archiver. +-archiver_list_spec=$lt_archiver_list_spec ++AR_FLAGS=$lt_AR_FLAGS + + # A symbol stripping program. + STRIP=$lt_STRIP +@@ -20344,27 +17683,12 @@ global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + # Transform the output of nm in a proper C declaration. + global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +-# Transform the output of nm into a list of symbols to manually relocate. +-global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import +- + # Transform the output of nm in a C name address pair. + global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + + # Transform the output of nm in a C name address pair when lib prefix is needed. + global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + +-# The name lister interface. +-nm_interface=$lt_lt_cv_nm_interface +- +-# Specify filename containing input files for \$NM. +-nm_file_list_spec=$lt_nm_file_list_spec +- +-# The root where to search for dependent libraries,and where our libraries should be installed. +-lt_sysroot=$lt_sysroot +- +-# Command to truncate a binary pipe. +-lt_truncate_bin=$lt_lt_cv_truncate_bin +- + # The name of the directory that contains temporary libtool files. + objdir=$objdir + +@@ -20374,9 +17698,6 @@ MAGIC_CMD=$MAGIC_CMD + # Must we lock files when doing compilation? + need_locks=$lt_need_locks + +-# Manifest tool. +-MANIFEST_TOOL=$lt_MANIFEST_TOOL +- + # Tool to manipulate archived DWARF debug symbol files on Mac OS X. + DSYMUTIL=$lt_DSYMUTIL + +@@ -20455,11 +17776,8 @@ hardcode_into_libs=$hardcode_into_libs + # Compile-time system search path for libraries. + sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +-# Detected run-time system search path for libraries. +-sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path +- +-# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. +-configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path ++# Run-time system search path for libraries. ++sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + + # Whether dlopen is supported. + dlopen_support=$enable_dlopen +@@ -20494,12 +17812,12 @@ with_gcc=$GCC + # Compiler flag to turn off builtin functions. + no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +-# Additional compiler flags for building library objects. +-pic_flag=$lt_lt_prog_compiler_pic +- + # How to pass a linker flag through the compiler. + wl=$lt_lt_prog_compiler_wl + ++# Additional compiler flags for building library objects. ++pic_flag=$lt_lt_prog_compiler_pic ++ + # Compiler flag to prevent dynamic linking. + link_static_flag=$lt_lt_prog_compiler_static + +@@ -20549,16 +17867,20 @@ no_undefined_flag=$lt_no_undefined_flag + # This must work even if \$libdir does not exist + hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + ++# If ld is used when linking, flag to hardcode \$libdir into a binary ++# during linking. This must work even if \$libdir does not exist. ++hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld ++ + # Whether we need a single "-rpath" flag with a separated argument. + hardcode_libdir_separator=$lt_hardcode_libdir_separator + +-# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes ++# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes + # DIR into the resulting binary. + hardcode_direct=$hardcode_direct + +-# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes ++# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes + # DIR into the resulting binary and the resulting library dependency is +-# "absolute",i.e impossible to change by setting \$shlibpath_var if the ++# "absolute",i.e impossible to change by setting \${shlibpath_var} if the + # library is relocated. + hardcode_direct_absolute=$hardcode_direct_absolute + +@@ -20582,6 +17904,9 @@ inherit_rpath=$inherit_rpath + # Whether libtool must link a program against all its dependency libraries. + link_all_deplibs=$link_all_deplibs + ++# Fix the shell variable \$srcfile for the compiler. ++fix_srcfile_path=$lt_fix_srcfile_path ++ + # Set to "yes" if exported symbols are required. + always_export_symbols=$always_export_symbols + +@@ -20597,9 +17922,6 @@ include_expsyms=$lt_include_expsyms + # Commands necessary for linking programs (against libraries) with templates. + prelink_cmds=$lt_prelink_cmds + +-# Commands necessary for finishing linking programs. +-postlink_cmds=$lt_postlink_cmds +- + # Specify filename containing input files. + file_list_spec=$lt_file_list_spec + +@@ -20622,65 +17944,6 @@ compiler_lib_search_path=$lt_compiler_lib_search_path + + # ### END LIBTOOL CONFIG + +-_LT_EOF +- +- cat <<'_LT_EOF' >> "$cfgfile" +- +-# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE +- +-# func_munge_path_list VARIABLE PATH +-# ----------------------------------- +-# VARIABLE is name of variable containing _space_ separated list of +-# directories to be munged by the contents of PATH, which is string +-# having a format: +-# "DIR[:DIR]:" +-# string "DIR[ DIR]" will be prepended to VARIABLE +-# ":DIR[:DIR]" +-# string "DIR[ DIR]" will be appended to VARIABLE +-# "DIRP[:DIRP]::[DIRA:]DIRA" +-# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +-# "DIRA[ DIRA]" will be appended to VARIABLE +-# "DIR[:DIR]" +-# VARIABLE will be replaced by "DIR[ DIR]" +-func_munge_path_list () +-{ +- case x$2 in +- x) +- ;; +- *:) +- eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" +- ;; +- x:*) +- eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" +- ;; +- *::*) +- eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" +- eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" +- ;; +- *) +- eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" +- ;; +- esac +-} +- +- +-# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +-func_cc_basename () +-{ +- for cc_temp in $*""; do +- case $cc_temp in +- compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; +- distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; +- \-*) ;; +- *) break;; +- esac +- done +- func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +-} +- +- +-# ### END FUNCTIONS SHARED WITH CONFIGURE +- + _LT_EOF + + case $host_os in +@@ -20689,7 +17952,7 @@ _LT_EOF + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. +-if test set != "${COLLECT_NAMES+set}"; then ++if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi +@@ -20698,18 +17961,217 @@ _LT_EOF + esac + + +- +-ltmain=$ac_aux_dir/ltmain.sh ++ltmain="$ac_aux_dir/ltmain.sh" + + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? +- $SED '$q' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) ++ sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ ++ || (rm -f "$cfgfile"; exit 1) ++ ++ case $xsi_shell in ++ yes) ++ cat << \_LT_EOF >> "$cfgfile" ++ ++# func_dirname file append nondir_replacement ++# Compute the dirname of FILE. If nonempty, add APPEND to the result, ++# otherwise set result to NONDIR_REPLACEMENT. ++func_dirname () ++{ ++ case ${1} in ++ */*) func_dirname_result="${1%/*}${2}" ;; ++ * ) func_dirname_result="${3}" ;; ++ esac ++} ++ ++# func_basename file ++func_basename () ++{ ++ func_basename_result="${1##*/}" ++} ++ ++# func_dirname_and_basename file append nondir_replacement ++# perform func_basename and func_dirname in a single function ++# call: ++# dirname: Compute the dirname of FILE. If nonempty, ++# add APPEND to the result, otherwise set result ++# to NONDIR_REPLACEMENT. ++# value returned in "$func_dirname_result" ++# basename: Compute filename of FILE. ++# value retuned in "$func_basename_result" ++# Implementation must be kept synchronized with func_dirname ++# and func_basename. For efficiency, we do not delegate to ++# those functions but instead duplicate the functionality here. ++func_dirname_and_basename () ++{ ++ case ${1} in ++ */*) func_dirname_result="${1%/*}${2}" ;; ++ * ) func_dirname_result="${3}" ;; ++ esac ++ func_basename_result="${1##*/}" ++} ++ ++# func_stripname prefix suffix name ++# strip PREFIX and SUFFIX off of NAME. ++# PREFIX and SUFFIX must not contain globbing or regex special ++# characters, hashes, percent signs, but SUFFIX may contain a leading ++# dot (in which case that matches only a dot). ++func_stripname () ++{ ++ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are ++ # positional parameters, so assign one to ordinary parameter first. ++ func_stripname_result=${3} ++ func_stripname_result=${func_stripname_result#"${1}"} ++ func_stripname_result=${func_stripname_result%"${2}"} ++} ++ ++# func_opt_split ++func_opt_split () ++{ ++ func_opt_split_opt=${1%%=*} ++ func_opt_split_arg=${1#*=} ++} ++ ++# func_lo2o object ++func_lo2o () ++{ ++ case ${1} in ++ *.lo) func_lo2o_result=${1%.lo}.${objext} ;; ++ *) func_lo2o_result=${1} ;; ++ esac ++} ++ ++# func_xform libobj-or-source ++func_xform () ++{ ++ func_xform_result=${1%.*}.lo ++} ++ ++# func_arith arithmetic-term... ++func_arith () ++{ ++ func_arith_result=$(( $* )) ++} + +- mv -f "$cfgfile" "$ofile" || ++# func_len string ++# STRING may not start with a hyphen. ++func_len () ++{ ++ func_len_result=${#1} ++} ++ ++_LT_EOF ++ ;; ++ *) # Bourne compatible functions. ++ cat << \_LT_EOF >> "$cfgfile" ++ ++# func_dirname file append nondir_replacement ++# Compute the dirname of FILE. If nonempty, add APPEND to the result, ++# otherwise set result to NONDIR_REPLACEMENT. ++func_dirname () ++{ ++ # Extract subdirectory from the argument. ++ func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` ++ if test "X$func_dirname_result" = "X${1}"; then ++ func_dirname_result="${3}" ++ else ++ func_dirname_result="$func_dirname_result${2}" ++ fi ++} ++ ++# func_basename file ++func_basename () ++{ ++ func_basename_result=`$ECHO "${1}" | $SED "$basename"` ++} ++ ++ ++# func_stripname prefix suffix name ++# strip PREFIX and SUFFIX off of NAME. ++# PREFIX and SUFFIX must not contain globbing or regex special ++# characters, hashes, percent signs, but SUFFIX may contain a leading ++# dot (in which case that matches only a dot). ++# func_strip_suffix prefix name ++func_stripname () ++{ ++ case ${2} in ++ .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; ++ *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; ++ esac ++} ++ ++# sed scripts: ++my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' ++my_sed_long_arg='1s/^-[^=]*=//' ++ ++# func_opt_split ++func_opt_split () ++{ ++ func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` ++ func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` ++} ++ ++# func_lo2o object ++func_lo2o () ++{ ++ func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` ++} ++ ++# func_xform libobj-or-source ++func_xform () ++{ ++ func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` ++} ++ ++# func_arith arithmetic-term... ++func_arith () ++{ ++ func_arith_result=`expr "$@"` ++} ++ ++# func_len string ++# STRING may not start with a hyphen. ++func_len () ++{ ++ func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` ++} ++ ++_LT_EOF ++esac ++ ++case $lt_shell_append in ++ yes) ++ cat << \_LT_EOF >> "$cfgfile" ++ ++# func_append var value ++# Append VALUE to the end of shell variable VAR. ++func_append () ++{ ++ eval "$1+=\$2" ++} ++_LT_EOF ++ ;; ++ *) ++ cat << \_LT_EOF >> "$cfgfile" ++ ++# func_append var value ++# Append VALUE to the end of shell variable VAR. ++func_append () ++{ ++ eval "$1=\$$1\$2" ++} ++ ++_LT_EOF ++ ;; ++ esac ++ ++ ++ sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ ++ || (rm -f "$cfgfile"; exit 1) ++ ++ mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + +@@ -20737,12 +18199,12 @@ with_gcc=$GCC_CXX + # Compiler flag to turn off builtin functions. + no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +-# Additional compiler flags for building library objects. +-pic_flag=$lt_lt_prog_compiler_pic_CXX +- + # How to pass a linker flag through the compiler. + wl=$lt_lt_prog_compiler_wl_CXX + ++# Additional compiler flags for building library objects. ++pic_flag=$lt_lt_prog_compiler_pic_CXX ++ + # Compiler flag to prevent dynamic linking. + link_static_flag=$lt_lt_prog_compiler_static_CXX + +@@ -20792,16 +18254,20 @@ no_undefined_flag=$lt_no_undefined_flag_CXX + # This must work even if \$libdir does not exist + hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX + ++# If ld is used when linking, flag to hardcode \$libdir into a binary ++# during linking. This must work even if \$libdir does not exist. ++hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_CXX ++ + # Whether we need a single "-rpath" flag with a separated argument. + hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX + +-# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes ++# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes + # DIR into the resulting binary. + hardcode_direct=$hardcode_direct_CXX + +-# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes ++# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes + # DIR into the resulting binary and the resulting library dependency is +-# "absolute",i.e impossible to change by setting \$shlibpath_var if the ++# "absolute",i.e impossible to change by setting \${shlibpath_var} if the + # library is relocated. + hardcode_direct_absolute=$hardcode_direct_absolute_CXX + +@@ -20825,6 +18291,9 @@ inherit_rpath=$inherit_rpath_CXX + # Whether libtool must link a program against all its dependency libraries. + link_all_deplibs=$link_all_deplibs_CXX + ++# Fix the shell variable \$srcfile for the compiler. ++fix_srcfile_path=$lt_fix_srcfile_path_CXX ++ + # Set to "yes" if exported symbols are required. + always_export_symbols=$always_export_symbols_CXX + +@@ -20840,9 +18309,6 @@ include_expsyms=$lt_include_expsyms_CXX + # Commands necessary for linking programs (against libraries) with templates. + prelink_cmds=$lt_prelink_cmds_CXX + +-# Commands necessary for finishing linking programs. +-postlink_cmds=$lt_postlink_cmds_CXX +- + # Specify filename containing input files. + file_list_spec=$lt_file_list_spec_CXX + +@@ -20901,9 +18367,8 @@ if test "$no_create" != yes; then + $ac_cs_success || as_fn_exit 1 + fi + if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then +- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +-printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 ++$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} + fi + + +- +-- +2.33.0 + diff --git a/0055-Fix-atomic-issue-for-certain-CPU.patch b/0055-Fix-atomic-issue-for-certain-CPU.patch new file mode 100644 index 0000000000000000000000000000000000000000..12872aa2e01453fc5c6d9475309d1d2e8b5442a9 --- /dev/null +++ b/0055-Fix-atomic-issue-for-certain-CPU.patch @@ -0,0 +1,116 @@ +From 9f31937f95e6d4952c48f1c1afffefad7cfa7f4f Mon Sep 17 00:00:00 2001 +From: jiangzhiying +Date: Fri, 27 Sep 2024 12:38:42 +0800 +Subject: [PATCH] Fix atomic issue for certain CPU + +- Insert one 'fence' after 'amo' +- Insert one 'fence' after 'sc' +--- + gcc/config/riscv/sync.md | 14 +++++++++----- + libgcc/config/riscv/atomic.c | 2 ++ + 2 files changed, 11 insertions(+), 5 deletions(-) + +diff --git a/gcc/config/riscv/sync.md b/gcc/config/riscv/sync.md +index 9c4fbabc6..d79b48db7 100644 +--- a/gcc/config/riscv/sync.md ++++ b/gcc/config/riscv/sync.md +@@ -68,7 +68,7 @@ + (match_operand:SI 2 "const_int_operand")] ;; model + UNSPEC_ATOMIC_STORE))] + "TARGET_ATOMIC" +- "%F2amoswap.%A2 zero,%z1,%0" ++ "%F2amoswap.%A2 zero,%z1,%0; fence" + [(set (attr "length") (const_int 8))]) + + (define_insn "atomic_" +@@ -79,7 +79,7 @@ + (match_operand:SI 2 "const_int_operand")] ;; model + UNSPEC_SYNC_OLD_OP))] + "TARGET_ATOMIC" +- "%F2amo.%A2 zero,%z1,%0" ++ "%F2amo.%A2 zero,%z1,%0; fence" + [(set (attr "length") (const_int 8))]) + + (define_insn "atomic_fetch_" +@@ -92,7 +92,7 @@ + (match_operand:SI 3 "const_int_operand")] ;; model + UNSPEC_SYNC_OLD_OP))] + "TARGET_ATOMIC" +- "%F3amo.%A3 %0,%z2,%1" ++ "%F3amo.%A3 %0,%z2,%1; fence" + [(set (attr "length") (const_int 8))]) + + (define_insn "subword_atomic_fetch_strong_" +@@ -116,6 +116,7 @@ + "and\t%6, %0, %4\;" + "or\t%6, %6, %5\;" + "sc.w.rl\t%5, %6, %1\;" ++ "fence\;" + "bnez\t%5, 1b"; + } + [(set (attr "length") (const_int 28))]) +@@ -181,6 +182,7 @@ + "and\t%6, %0, %4\;" + "or\t%6, %6, %5\;" + "sc.w.rl\t%5, %6, %1\;" ++ "fence\;" + "bnez\t%5, 1b"; + } + [(set (attr "length") (const_int 32))]) +@@ -233,7 +235,7 @@ + (set (match_dup 1) + (match_operand:GPR 2 "register_operand" "0"))] + "TARGET_ATOMIC" +- "%F3amoswap.%A3 %0,%z2,%1" ++ "%F3amoswap.%A3 %0,%z2,%1; fence" + [(set (attr "length") (const_int 8))]) + + (define_expand "atomic_exchange" +@@ -282,6 +284,7 @@ + "and\t%4, %0, %3\;" + "or\t%4, %4, %2\;" + "sc.w.rl\t%4, %4, %1\;" ++ "fence\;" + "bnez\t%4, 1b"; + } + [(set (attr "length") (const_int 20))]) +@@ -297,7 +300,7 @@ + UNSPEC_COMPARE_AND_SWAP)) + (clobber (match_scratch:GPR 6 "=&r"))] + "TARGET_ATOMIC" +- "%F5 1: lr.%A5 %0,%1; bne %0,%z2,1f; sc.%A4 %6,%z3,%1; bnez %6,1b; 1:" ++ "%F5 1: lr.%A5 %0,%1; bne %0,%z2,1f; sc.%A4 %6,%z3,%1; fence; bnez %6,1b; 1:" + [(set (attr "length") (const_int 20))]) + + (define_expand "atomic_compare_and_swap" +@@ -448,6 +451,7 @@ + "and\t%6, %0, %5\;" + "or\t%6, %6, %3\;" + "sc.w.rl\t%6, %6, %1\;" ++ "fence\;" + "bnez\t%6, 1b\;" + "1:"; + } +diff --git a/libgcc/config/riscv/atomic.c b/libgcc/config/riscv/atomic.c +index a29909b97..4a3b6937f 100644 +--- a/libgcc/config/riscv/atomic.c ++++ b/libgcc/config/riscv/atomic.c +@@ -48,6 +48,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + "and %[tmp2], %[old], %[not_mask]\n\t" \ + "or %[tmp2], %[tmp2], %[tmp1]\n\t" \ + "sc.w.rl %[tmp1], %[tmp2], %[mem]\n\t" \ ++ "fence\n\t" \ + "bnez %[tmp1], 1b" \ + : [old] "=&r" (old), \ + [mem] "+A" (*(volatile unsigned*) aligned_addr), \ +@@ -81,6 +82,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + "and %[tmp1], %[old], %[not_mask]\n\t" \ + "or %[tmp1], %[tmp1], %[n]\n\t" \ + "sc.w.rl %[tmp1], %[tmp1], %[mem]\n\t" \ ++ "fence\n\t" \ + "bnez %[tmp1], 1b\n\t" \ + "1:" \ + : [old] "=&r" (old), \ +-- +2.43.0 + diff --git a/Libvtv-Add-loongarch-support.patch b/Libvtv-Add-loongarch-support.patch new file mode 100644 index 0000000000000000000000000000000000000000..0350e5a56a900d5ac059e1b01dd9eed0636e81e4 --- /dev/null +++ b/Libvtv-Add-loongarch-support.patch @@ -0,0 +1,59 @@ +From 62ea18c632200edbbf46b4e957bc4d997f1c66f0 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Tue, 27 Sep 2022 15:28:43 +0800 +Subject: [PATCH 024/124] Libvtv: Add loongarch support. + +The loongarch64 specification permits page sizes of 4KiB, 16KiB and 64KiB, +but only 16KiB pages are supported for now. + +Co-Authored-By: qijingwen + +include/ChangeLog: + + * vtv-change-permission.h (defined): Determines whether the macro + __loongarch_lp64 is defined + (VTV_PAGE_SIZE): Set VTV_PAGE_SIZE to 16KiB for loongarch64. + +libvtv/ChangeLog: + + * configure.tgt: Add loongarch support. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + include/vtv-change-permission.h | 4 ++++ + libvtv/configure.tgt | 3 +++ + 2 files changed, 7 insertions(+) + +diff --git a/include/vtv-change-permission.h b/include/vtv-change-permission.h +index 70bdad92b..e7b9294a0 100644 +--- a/include/vtv-change-permission.h ++++ b/include/vtv-change-permission.h +@@ -48,6 +48,10 @@ extern void __VLTChangePermission (int); + #else + #if defined(__sun__) && defined(__svr4__) && defined(__sparc__) + #define VTV_PAGE_SIZE 8192 ++#elif defined(__loongarch_lp64) ++/* The page size is configurable by the kernel to be 4, 16 or 64 KiB. ++ For now, only the default page size of 16KiB is supported. */ ++#define VTV_PAGE_SIZE 16384 + #else + #define VTV_PAGE_SIZE 4096 + #endif +diff --git a/libvtv/configure.tgt b/libvtv/configure.tgt +index aa2a3f675..6cdd1e97a 100644 +--- a/libvtv/configure.tgt ++++ b/libvtv/configure.tgt +@@ -50,6 +50,9 @@ case "${target}" in + ;; + x86_64-*-darwin[1]* | i?86-*-darwin[1]*) + ;; ++ loongarch*-*-linux*) ++ VTV_SUPPORTED=yes ++ ;; + *) + ;; + esac +-- +2.33.0 + diff --git a/LoongArch-Add-Loongson-ASX-base-instruction-support.patch b/LoongArch-Add-Loongson-ASX-base-instruction-support.patch new file mode 100644 index 0000000000000000000000000000000000000000..2cc4dda131b28c6ddb8010d4a933d08e406516fb --- /dev/null +++ b/LoongArch-Add-Loongson-ASX-base-instruction-support.patch @@ -0,0 +1,8376 @@ +From 2f0874e6e6f5a866e71826983dc18295c408748b Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Thu, 16 Mar 2023 16:34:08 +0800 +Subject: [PATCH 065/124] LoongArch: Add Loongson ASX base instruction support. + +gcc/ChangeLog: + + * config/loongarch/loongarch-modes.def + (VECTOR_MODES): Add Loongson ASX instruction support. + * config/loongarch/loongarch-protos.h (loongarch_split_256bit_move): Ditto. + (loongarch_split_256bit_move_p): Ditto. + (loongarch_expand_vector_group_init): Ditto. + (loongarch_expand_vec_perm_1): Ditto. + * config/loongarch/loongarch.cc (loongarch_symbol_insns): Ditto. + (loongarch_valid_offset_p): Ditto. + (loongarch_address_insns): Ditto. + (loongarch_const_insns): Ditto. + (loongarch_legitimize_move): Ditto. + (loongarch_builtin_vectorization_cost): Ditto. + (loongarch_split_move_p): Ditto. + (loongarch_split_move): Ditto. + (loongarch_output_move_index_float): Ditto. + (loongarch_split_256bit_move_p): Ditto. + (loongarch_split_256bit_move): Ditto. + (loongarch_output_move): Ditto. + (loongarch_print_operand_reloc): Ditto. + (loongarch_print_operand): Ditto. + (loongarch_hard_regno_mode_ok_uncached): Ditto. + (loongarch_hard_regno_nregs): Ditto. + (loongarch_class_max_nregs): Ditto. + (loongarch_can_change_mode_class): Ditto. + (loongarch_mode_ok_for_mov_fmt_p): Ditto. + (loongarch_vector_mode_supported_p): Ditto. + (loongarch_preferred_simd_mode): Ditto. + (loongarch_autovectorize_vector_modes): Ditto. + (loongarch_lsx_output_division): Ditto. + (loongarch_expand_lsx_shuffle): Ditto. + (loongarch_expand_vec_perm): Ditto. + (loongarch_expand_vec_perm_interleave): Ditto. + (loongarch_try_expand_lsx_vshuf_const): Ditto. + (loongarch_expand_vec_perm_even_odd_1): Ditto. + (loongarch_expand_vec_perm_even_odd): Ditto. + (loongarch_expand_vec_perm_1): Ditto. + (loongarch_expand_vec_perm_const_2): Ditto. + (loongarch_is_quad_duplicate): Ditto. + (loongarch_is_double_duplicate): Ditto. + (loongarch_is_odd_extraction): Ditto. + (loongarch_is_even_extraction): Ditto. + (loongarch_is_extraction_permutation): Ditto. + (loongarch_is_center_extraction): Ditto. + (loongarch_is_reversing_permutation): Ditto. + (loongarch_is_di_misalign_extract): Ditto. + (loongarch_is_si_misalign_extract): Ditto. + (loongarch_is_lasx_lowpart_interleave): Ditto. + (loongarch_is_lasx_lowpart_interleave_2): Ditto. + (COMPARE_SELECTOR): Ditto. + (loongarch_is_lasx_lowpart_extract): Ditto. + (loongarch_is_lasx_highpart_interleave): Ditto. + (loongarch_is_lasx_highpart_interleave_2): Ditto. + (loongarch_is_elem_duplicate): Ditto. + (loongarch_is_op_reverse_perm): Ditto. + (loongarch_is_single_op_perm): Ditto. + (loongarch_is_divisible_perm): Ditto. + (loongarch_is_triple_stride_extract): Ditto. + (loongarch_vectorize_vec_perm_const): Ditto. + (loongarch_cpu_sched_reassociation_width): Ditto. + (loongarch_expand_vector_extract): Ditto. + (emit_reduc_half): Ditto. + (loongarch_expand_vec_unpack): Ditto. + (loongarch_expand_vector_group_init): Ditto. + (loongarch_expand_vector_init): Ditto. + (loongarch_expand_lsx_cmp): Ditto. + (loongarch_builtin_support_vector_misalignment): Ditto. + * config/loongarch/loongarch.h (UNITS_PER_LASX_REG): Ditto. + (BITS_PER_LASX_REG): Ditto. + (STRUCTURE_SIZE_BOUNDARY): Ditto. + (LASX_REG_FIRST): Ditto. + (LASX_REG_LAST): Ditto. + (LASX_REG_NUM): Ditto. + (LASX_REG_P): Ditto. + (LASX_REG_RTX_P): Ditto. + (LASX_SUPPORTED_MODE_P): Ditto. + * config/loongarch/loongarch.md: Ditto. + * config/loongarch/lasx.md: New file. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/lasx.md | 5104 ++++++++++++++++++++++ + gcc/config/loongarch/loongarch-modes.def | 1 + + gcc/config/loongarch/loongarch-protos.h | 4 + + gcc/config/loongarch/loongarch.cc | 2567 ++++++++++- + gcc/config/loongarch/loongarch.h | 60 +- + gcc/config/loongarch/loongarch.md | 20 +- + 6 files changed, 7637 insertions(+), 119 deletions(-) + create mode 100644 gcc/config/loongarch/lasx.md + +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +new file mode 100644 +index 000000000..8111c8bb7 +--- /dev/null ++++ b/gcc/config/loongarch/lasx.md +@@ -0,0 +1,5104 @@ ++;; Machine Description for LARCH Loongson ASX ASE ++;; ++;; Copyright (C) 2018 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++;; ++ ++(define_c_enum "unspec" [ ++ UNSPEC_LASX_XVABSD_S ++ UNSPEC_LASX_XVABSD_U ++ UNSPEC_LASX_XVAVG_S ++ UNSPEC_LASX_XVAVG_U ++ UNSPEC_LASX_XVAVGR_S ++ UNSPEC_LASX_XVAVGR_U ++ UNSPEC_LASX_XVBITCLR ++ UNSPEC_LASX_XVBITCLRI ++ UNSPEC_LASX_XVBITREV ++ UNSPEC_LASX_XVBITREVI ++ UNSPEC_LASX_XVBITSET ++ UNSPEC_LASX_XVBITSETI ++ UNSPEC_LASX_XVFCMP_CAF ++ UNSPEC_LASX_XVFCLASS ++ UNSPEC_LASX_XVFCMP_CUNE ++ UNSPEC_LASX_XVFCVT ++ UNSPEC_LASX_XVFCVTH ++ UNSPEC_LASX_XVFCVTL ++ UNSPEC_LASX_XVFLOGB ++ UNSPEC_LASX_XVFRECIP ++ UNSPEC_LASX_XVFRINT ++ UNSPEC_LASX_XVFRSQRT ++ UNSPEC_LASX_XVFCMP_SAF ++ UNSPEC_LASX_XVFCMP_SEQ ++ UNSPEC_LASX_XVFCMP_SLE ++ UNSPEC_LASX_XVFCMP_SLT ++ UNSPEC_LASX_XVFCMP_SNE ++ UNSPEC_LASX_XVFCMP_SOR ++ UNSPEC_LASX_XVFCMP_SUEQ ++ UNSPEC_LASX_XVFCMP_SULE ++ UNSPEC_LASX_XVFCMP_SULT ++ UNSPEC_LASX_XVFCMP_SUN ++ UNSPEC_LASX_XVFCMP_SUNE ++ UNSPEC_LASX_XVFTINT_S ++ UNSPEC_LASX_XVFTINT_U ++ UNSPEC_LASX_XVCLO ++ UNSPEC_LASX_XVSAT_S ++ UNSPEC_LASX_XVSAT_U ++ UNSPEC_LASX_XVREPLVE0 ++ UNSPEC_LASX_XVREPL128VEI ++ UNSPEC_LASX_XVSRAR ++ UNSPEC_LASX_XVSRARI ++ UNSPEC_LASX_XVSRLR ++ UNSPEC_LASX_XVSRLRI ++ UNSPEC_LASX_XVSHUF ++ UNSPEC_LASX_XVSHUF_B ++ UNSPEC_LASX_BRANCH ++ UNSPEC_LASX_BRANCH_V ++ ++ UNSPEC_LASX_XVMUH_S ++ UNSPEC_LASX_XVMUH_U ++ UNSPEC_LASX_MXVEXTW_U ++ UNSPEC_LASX_XVSLLWIL_S ++ UNSPEC_LASX_XVSLLWIL_U ++ UNSPEC_LASX_XVSRAN ++ UNSPEC_LASX_XVSSRAN_S ++ UNSPEC_LASX_XVSSRAN_U ++ UNSPEC_LASX_XVSRARN ++ UNSPEC_LASX_XVSSRARN_S ++ UNSPEC_LASX_XVSSRARN_U ++ UNSPEC_LASX_XVSRLN ++ UNSPEC_LASX_XVSSRLN_U ++ UNSPEC_LASX_XVSRLRN ++ UNSPEC_LASX_XVSSRLRN_U ++ UNSPEC_LASX_XVFRSTPI ++ UNSPEC_LASX_XVFRSTP ++ UNSPEC_LASX_XVSHUF4I ++ UNSPEC_LASX_XVBSRL_V ++ UNSPEC_LASX_XVBSLL_V ++ UNSPEC_LASX_XVEXTRINS ++ UNSPEC_LASX_XVMSKLTZ ++ UNSPEC_LASX_XVSIGNCOV ++ UNSPEC_LASX_XVFTINTRNE_W_S ++ UNSPEC_LASX_XVFTINTRNE_L_D ++ UNSPEC_LASX_XVFTINTRP_W_S ++ UNSPEC_LASX_XVFTINTRP_L_D ++ UNSPEC_LASX_XVFTINTRM_W_S ++ UNSPEC_LASX_XVFTINTRM_L_D ++ UNSPEC_LASX_XVFTINT_W_D ++ UNSPEC_LASX_XVFFINT_S_L ++ UNSPEC_LASX_XVFTINTRZ_W_D ++ UNSPEC_LASX_XVFTINTRP_W_D ++ UNSPEC_LASX_XVFTINTRM_W_D ++ UNSPEC_LASX_XVFTINTRNE_W_D ++ UNSPEC_LASX_XVFTINTH_L_S ++ UNSPEC_LASX_XVFTINTL_L_S ++ UNSPEC_LASX_XVFFINTH_D_W ++ UNSPEC_LASX_XVFFINTL_D_W ++ UNSPEC_LASX_XVFTINTRZH_L_S ++ UNSPEC_LASX_XVFTINTRZL_L_S ++ UNSPEC_LASX_XVFTINTRPH_L_S ++ UNSPEC_LASX_XVFTINTRPL_L_S ++ UNSPEC_LASX_XVFTINTRMH_L_S ++ UNSPEC_LASX_XVFTINTRML_L_S ++ UNSPEC_LASX_XVFTINTRNEL_L_S ++ UNSPEC_LASX_XVFTINTRNEH_L_S ++ UNSPEC_LASX_XVFRINTRNE_S ++ UNSPEC_LASX_XVFRINTRNE_D ++ UNSPEC_LASX_XVFRINTRZ_S ++ UNSPEC_LASX_XVFRINTRZ_D ++ UNSPEC_LASX_XVFRINTRP_S ++ UNSPEC_LASX_XVFRINTRP_D ++ UNSPEC_LASX_XVFRINTRM_S ++ UNSPEC_LASX_XVFRINTRM_D ++ UNSPEC_LASX_XVREPLVE0_Q ++ UNSPEC_LASX_XVPERM_W ++ UNSPEC_LASX_XVPERMI_Q ++ UNSPEC_LASX_XVPERMI_D ++ ++ UNSPEC_LASX_XVADDWEV ++ UNSPEC_LASX_XVADDWEV2 ++ UNSPEC_LASX_XVADDWEV3 ++ UNSPEC_LASX_XVSUBWEV ++ UNSPEC_LASX_XVSUBWEV2 ++ UNSPEC_LASX_XVMULWEV ++ UNSPEC_LASX_XVMULWEV2 ++ UNSPEC_LASX_XVMULWEV3 ++ UNSPEC_LASX_XVADDWOD ++ UNSPEC_LASX_XVADDWOD2 ++ UNSPEC_LASX_XVADDWOD3 ++ UNSPEC_LASX_XVSUBWOD ++ UNSPEC_LASX_XVSUBWOD2 ++ UNSPEC_LASX_XVMULWOD ++ UNSPEC_LASX_XVMULWOD2 ++ UNSPEC_LASX_XVMULWOD3 ++ UNSPEC_LASX_XVMADDWEV ++ UNSPEC_LASX_XVMADDWEV2 ++ UNSPEC_LASX_XVMADDWEV3 ++ UNSPEC_LASX_XVMADDWOD ++ UNSPEC_LASX_XVMADDWOD2 ++ UNSPEC_LASX_XVMADDWOD3 ++ UNSPEC_LASX_XVHADDW_Q_D ++ UNSPEC_LASX_XVHSUBW_Q_D ++ UNSPEC_LASX_XVHADDW_QU_DU ++ UNSPEC_LASX_XVHSUBW_QU_DU ++ UNSPEC_LASX_XVROTR ++ UNSPEC_LASX_XVADD_Q ++ UNSPEC_LASX_XVSUB_Q ++ UNSPEC_LASX_XVREPLVE ++ UNSPEC_LASX_XVSHUF4 ++ UNSPEC_LASX_XVMSKGEZ ++ UNSPEC_LASX_XVMSKNZ ++ UNSPEC_LASX_XVEXTH_Q_D ++ UNSPEC_LASX_XVEXTH_QU_DU ++ UNSPEC_LASX_XVEXTL_Q_D ++ UNSPEC_LASX_XVSRLNI ++ UNSPEC_LASX_XVSRLRNI ++ UNSPEC_LASX_XVSSRLNI ++ UNSPEC_LASX_XVSSRLNI2 ++ UNSPEC_LASX_XVSSRLRNI ++ UNSPEC_LASX_XVSSRLRNI2 ++ UNSPEC_LASX_XVSRANI ++ UNSPEC_LASX_XVSRARNI ++ UNSPEC_LASX_XVSSRANI ++ UNSPEC_LASX_XVSSRANI2 ++ UNSPEC_LASX_XVSSRARNI ++ UNSPEC_LASX_XVSSRARNI2 ++ UNSPEC_LASX_XVPERMI ++ UNSPEC_LASX_XVINSVE0 ++ UNSPEC_LASX_XVPICKVE ++ UNSPEC_LASX_XVSSRLN ++ UNSPEC_LASX_XVSSRLRN ++ UNSPEC_LASX_XVEXTL_QU_DU ++ UNSPEC_LASX_XVLDI ++ UNSPEC_LASX_XVLDX ++ UNSPEC_LASX_XVSTX ++]) ++ ++;; All vector modes with 256 bits. ++(define_mode_iterator LASX [V4DF V8SF V4DI V8SI V16HI V32QI]) ++ ++;; Same as LASX. Used by vcond to iterate two modes. ++(define_mode_iterator LASX_2 [V4DF V8SF V4DI V8SI V16HI V32QI]) ++ ++;; Only used for splitting insert_d and copy_{u,s}.d. ++(define_mode_iterator LASX_D [V4DI V4DF]) ++ ++;; Only used for splitting insert_d and copy_{u,s}.d. ++(define_mode_iterator LASX_WD [V4DI V4DF V8SI V8SF]) ++ ++;; Only used for copy256_{u,s}.w. ++(define_mode_iterator LASX_W [V8SI V8SF]) ++ ++;; Only integer modes in LASX. ++(define_mode_iterator ILASX [V4DI V8SI V16HI V32QI]) ++ ++;; As ILASX but excludes V32QI. ++(define_mode_iterator ILASX_DWH [V4DI V8SI V16HI]) ++ ++;; As LASX but excludes V32QI. ++(define_mode_iterator LASX_DWH [V4DF V8SF V4DI V8SI V16HI]) ++ ++;; As ILASX but excludes V4DI. ++(define_mode_iterator ILASX_WHB [V8SI V16HI V32QI]) ++ ++;; Only integer modes equal or larger than a word. ++(define_mode_iterator ILASX_DW [V4DI V8SI]) ++ ++;; Only integer modes smaller than a word. ++(define_mode_iterator ILASX_HB [V16HI V32QI]) ++ ++;; Only floating-point modes in LASX. ++(define_mode_iterator FLASX [V4DF V8SF]) ++ ++;; Only used for immediate set shuffle elements instruction. ++(define_mode_iterator LASX_WHB_W [V8SI V16HI V32QI V8SF]) ++ ++;; The attribute gives the integer vector mode with same size in Loongson ASX. ++(define_mode_attr VIMODE256 ++ [(V4DF "V4DI") ++ (V8SF "V8SI") ++ (V4DI "V4DI") ++ (V8SI "V8SI") ++ (V16HI "V16HI") ++ (V32QI "V32QI")]) ++ ++;;attribute gives half modes for vector modes. ++;;attribute gives half modes (Same Size) for vector modes. ++(define_mode_attr VHSMODE256 ++ [(V16HI "V32QI") ++ (V8SI "V16HI") ++ (V4DI "V8SI")]) ++ ++;;attribute gives half modes for vector modes. ++(define_mode_attr VHMODE256 ++ [(V32QI "V16QI") ++ (V16HI "V8HI") ++ (V8SI "V4SI") ++ (V4DI "V2DI")]) ++ ++;;attribute gives half float modes for vector modes. ++(define_mode_attr VFHMODE256 ++ [(V8SF "V4SF") ++ (V4DF "V2DF")]) ++ ++;; The attribute gives double modes for vector modes in LASX. ++(define_mode_attr VDMODE256 ++ [(V8SI "V4DI") ++ (V16HI "V8SI") ++ (V32QI "V16HI")]) ++ ++;; extended from VDMODE256 ++(define_mode_attr VDMODEEXD256 ++ [(V4DI "V4DI") ++ (V8SI "V4DI") ++ (V16HI "V8SI") ++ (V32QI "V16HI")]) ++ ++;; The attribute gives half modes with same number of elements for vector modes. ++(define_mode_attr VTRUNCMODE256 ++ [(V16HI "V16QI") ++ (V8SI "V8HI") ++ (V4DI "V4SI")]) ++ ++;; Double-sized Vector MODE with same elemet type. "Vector, Enlarged-MODE" ++(define_mode_attr VEMODE256 ++ [(V8SF "V16SF") ++ (V8SI "V16SI") ++ (V4DF "V8DF") ++ (V4DI "V8DI")]) ++ ++;; This attribute gives the mode of the result for "copy_s_b, copy_u_b" etc. ++(define_mode_attr VRES256 ++ [(V4DF "DF") ++ (V8SF "SF") ++ (V4DI "DI") ++ (V8SI "SI") ++ (V16HI "SI") ++ (V32QI "SI")]) ++ ++;; Only used with LASX_D iterator. ++(define_mode_attr lasx_d ++ [(V4DI "reg_or_0") ++ (V4DF "register")]) ++ ++;; This attribute gives the 256 bit integer vector mode with same size. ++(define_mode_attr mode256_i ++ [(V4DF "v4di") ++ (V8SF "v8si") ++ (V4DI "v4di") ++ (V8SI "v8si") ++ (V16HI "v16hi") ++ (V32QI "v32qi")]) ++ ++ ++;; This attribute gives the 256 bit float vector mode with same size. ++(define_mode_attr mode256_f ++ [(V4DF "v4df") ++ (V8SF "v8sf") ++ (V4DI "v4df") ++ (V8SI "v8sf")]) ++ ++ ;; This attribute gives suffix for LASX instructions. HOW? ++(define_mode_attr lasxfmt ++ [(V4DF "d") ++ (V8SF "w") ++ (V4DI "d") ++ (V8SI "w") ++ (V16HI "h") ++ (V32QI "b")]) ++ ++(define_mode_attr flasxfmt ++ [(V4DF "d") ++ (V8SF "s")]) ++ ++(define_mode_attr lasxfmt_u ++ [(V4DF "du") ++ (V8SF "wu") ++ (V4DI "du") ++ (V8SI "wu") ++ (V16HI "hu") ++ (V32QI "bu")]) ++ ++(define_mode_attr ilasxfmt ++ [(V4DF "l") ++ (V8SF "w")]) ++ ++(define_mode_attr ilasxfmt_u ++ [(V4DF "lu") ++ (V8SF "wu")]) ++ ++;; This attribute gives suffix for integers in VHMODE256. ++(define_mode_attr hlasxfmt ++ [(V4DI "w") ++ (V8SI "h") ++ (V16HI "b")]) ++ ++(define_mode_attr hlasxfmt_u ++ [(V4DI "wu") ++ (V8SI "hu") ++ (V16HI "bu")]) ++ ++;; This attribute gives suffix for integers in VHSMODE256. ++(define_mode_attr hslasxfmt ++ [(V4DI "w") ++ (V8SI "h") ++ (V16HI "b")]) ++ ++;; This attribute gives define_insn suffix for LASX instructions that need ++;; distinction between integer and floating point. ++(define_mode_attr lasxfmt_f ++ [(V4DF "d_f") ++ (V8SF "w_f") ++ (V4DI "d") ++ (V8SI "w") ++ (V16HI "h") ++ (V32QI "b")]) ++ ++(define_mode_attr flasxfmt_f ++ [(V4DF "d_f") ++ (V8SF "s_f") ++ (V4DI "d") ++ (V8SI "w") ++ (V16HI "h") ++ (V32QI "b")]) ++ ++;; This attribute gives define_insn suffix for LASX instructions that need ++;; distinction between integer and floating point. ++(define_mode_attr lasxfmt_f_wd ++ [(V4DF "d_f") ++ (V8SF "w_f") ++ (V4DI "d") ++ (V8SI "w")]) ++ ++;; This attribute gives suffix for integers in VHMODE256. ++(define_mode_attr dlasxfmt ++ [(V8SI "d") ++ (V16HI "w") ++ (V32QI "h")]) ++ ++(define_mode_attr dlasxfmt_u ++ [(V8SI "du") ++ (V16HI "wu") ++ (V32QI "hu")]) ++ ++;; for VDMODEEXD256 ++(define_mode_attr dlasxqfmt ++ [(V4DI "q") ++ (V8SI "d") ++ (V16HI "w") ++ (V32QI "h")]) ++ ++;; This is used to form an immediate operand constraint using ++;; "const__operand". ++(define_mode_attr indeximm256 ++ [(V4DF "0_to_3") ++ (V8SF "0_to_7") ++ (V4DI "0_to_3") ++ (V8SI "0_to_7") ++ (V16HI "uimm4") ++ (V32QI "uimm5")]) ++ ++;; This is used to form an immediate operand constraint using to ref high half ++;; "const__operand". ++(define_mode_attr indeximm_hi ++ [(V4DF "2_or_3") ++ (V8SF "4_to_7") ++ (V4DI "2_or_3") ++ (V8SI "4_to_7") ++ (V16HI "8_to_15") ++ (V32QI "16_to_31")]) ++ ++;; This is used to form an immediate operand constraint using to ref low half ++;; "const__operand". ++(define_mode_attr indeximm_lo ++ [(V4DF "0_or_1") ++ (V8SF "0_to_3") ++ (V4DI "0_or_1") ++ (V8SI "0_to_3") ++ (V16HI "uimm3") ++ (V32QI "uimm4")]) ++ ++;; This attribute represents bitmask needed for vec_merge using in lasx ++;; "const__operand". ++(define_mode_attr bitmask256 ++ [(V4DF "exp_4") ++ (V8SF "exp_8") ++ (V4DI "exp_4") ++ (V8SI "exp_8") ++ (V16HI "exp_16") ++ (V32QI "exp_32")]) ++ ++;; This attribute represents bitmask needed for vec_merge using to ref low half ++;; "const__operand". ++(define_mode_attr bitmask_lo ++ [(V4DF "exp_2") ++ (V8SF "exp_4") ++ (V4DI "exp_2") ++ (V8SI "exp_4") ++ (V16HI "exp_8") ++ (V32QI "exp_16")]) ++ ++ ++;; This attribute is used to form an immediate operand constraint using ++;; "const__operand". ++(define_mode_attr bitimm256 ++ [(V32QI "uimm3") ++ (V16HI "uimm4") ++ (V8SI "uimm5") ++ (V4DI "uimm6")]) ++ ++ ++(define_mode_attr d2lasxfmt ++ [(V8SI "q") ++ (V16HI "d") ++ (V32QI "w")]) ++ ++(define_mode_attr d2lasxfmt_u ++ [(V8SI "qu") ++ (V16HI "du") ++ (V32QI "wu")]) ++ ++(define_mode_attr VD2MODE256 ++ [(V8SI "V4DI") ++ (V16HI "V4DI") ++ (V32QI "V8SI")]) ++ ++(define_mode_attr lasxfmt_wd ++ [(V4DI "d") ++ (V8SI "w") ++ (V16HI "w") ++ (V32QI "w")]) ++ ++(define_int_iterator FRINT256_S [UNSPEC_LASX_XVFRINTRP_S ++ UNSPEC_LASX_XVFRINTRZ_S ++ UNSPEC_LASX_XVFRINT ++ UNSPEC_LASX_XVFRINTRM_S]) ++ ++(define_int_iterator FRINT256_D [UNSPEC_LASX_XVFRINTRP_D ++ UNSPEC_LASX_XVFRINTRZ_D ++ UNSPEC_LASX_XVFRINT ++ UNSPEC_LASX_XVFRINTRM_D]) ++ ++(define_int_attr frint256_pattern_s ++ [(UNSPEC_LASX_XVFRINTRP_S "ceil") ++ (UNSPEC_LASX_XVFRINTRZ_S "btrunc") ++ (UNSPEC_LASX_XVFRINT "rint") ++ (UNSPEC_LASX_XVFRINTRM_S "floor")]) ++ ++(define_int_attr frint256_pattern_d ++ [(UNSPEC_LASX_XVFRINTRP_D "ceil") ++ (UNSPEC_LASX_XVFRINTRZ_D "btrunc") ++ (UNSPEC_LASX_XVFRINT "rint") ++ (UNSPEC_LASX_XVFRINTRM_D "floor")]) ++ ++(define_int_attr frint256_suffix ++ [(UNSPEC_LASX_XVFRINTRP_S "rp") ++ (UNSPEC_LASX_XVFRINTRP_D "rp") ++ (UNSPEC_LASX_XVFRINTRZ_S "rz") ++ (UNSPEC_LASX_XVFRINTRZ_D "rz") ++ (UNSPEC_LASX_XVFRINT "") ++ (UNSPEC_LASX_XVFRINTRM_S "rm") ++ (UNSPEC_LASX_XVFRINTRM_D "rm")]) ++ ++(define_expand "vec_init" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vector_init (operands[0], operands[1]); ++ DONE; ++}) ++ ++(define_expand "vec_initv32qiv16qi" ++ [(match_operand:V32QI 0 "register_operand") ++ (match_operand:V16QI 1 "")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vector_group_init (operands[0], operands[1]); ++ DONE; ++}) ++ ++;; FIXME: Delete. ++(define_insn "vec_pack_trunc_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vec_concat: ++ (truncate: ++ (match_operand:ILASX_DWH 1 "register_operand" "f")) ++ (truncate: ++ (match_operand:ILASX_DWH 2 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvpickev.\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "") ++ (set_attr "length" "8")]) ++ ++(define_expand "vec_unpacks_hi_v8sf" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_dup 2))))] ++ "ISA_HAS_LASX" ++{ ++ operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode, ++ true/*high_p*/); ++}) ++ ++(define_expand "vec_unpacks_lo_v8sf" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_dup 2))))] ++ "ISA_HAS_LASX" ++{ ++ operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode, ++ false/*high_p*/); ++}) ++ ++(define_expand "vec_unpacks_hi_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, ++ true/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacks_lo_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, false/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacku_hi_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, true/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacku_lo_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, false/*high_p*/); ++ DONE; ++}) ++ ++(define_insn "lasx_xvinsgr2vr_" ++ [(set (match_operand:ILASX_DW 0 "register_operand" "=f") ++ (vec_merge:ILASX_DW ++ (vec_duplicate:ILASX_DW ++ (match_operand: 1 "reg_or_0_operand" "rJ")) ++ (match_operand:ILASX_DW 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")))] ++ "ISA_HAS_LASX" ++{ ++#if 0 ++ if (!TARGET_64BIT && (mode == V4DImode || mode == V4DFmode)) ++ return "#"; ++ else ++#endif ++ return "xvinsgr2vr.\t%u0,%z1,%y3"; ++} ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ ++(define_insn "vec_concatv4di" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (vec_concat:V4DI ++ (match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "vec_concatv8si" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "0") ++ (match_operand:V4SI 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "vec_concatv16hi" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "0") ++ (match_operand:V8HI 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "vec_concatv32qi" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "0") ++ (match_operand:V16QI 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "vec_concatv4df" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (vec_concat:V4DF ++ (match_operand:V2DF 1 "register_operand" "0") ++ (match_operand:V2DF 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "vec_concatv8sf" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "0") ++ (match_operand:V4SF 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++;; xshuf.w ++(define_insn "lasx_xvperm_" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (unspec:LASX_W ++ [(match_operand:LASX_W 1 "nonimmediate_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVPERM_W))] ++ "ISA_HAS_LASX" ++ "xvperm.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++;; xvpermi.d ++(define_insn "lasx_xvpermi_d_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX ++ [(match_operand:LASX 1 "register_operand" "f") ++ (match_operand:SI 2 "const_uimm8_operand")] ++ UNSPEC_LASX_XVPERMI_D))] ++ "ISA_HAS_LASX" ++ "xvpermi.d\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpermi_d__1" ++ [(set (match_operand:LASX_D 0 "register_operand" "=f") ++ (vec_select:LASX_D ++ (match_operand:LASX_D 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_3_operand") ++ (match_operand 3 "const_0_to_3_operand") ++ (match_operand 4 "const_0_to_3_operand") ++ (match_operand 5 "const_0_to_3_operand")])))] ++ "ISA_HAS_LASX" ++{ ++ int mask = 0; ++ mask |= INTVAL (operands[2]) << 0; ++ mask |= INTVAL (operands[3]) << 2; ++ mask |= INTVAL (operands[4]) << 4; ++ mask |= INTVAL (operands[5]) << 6; ++ operands[2] = GEN_INT (mask); ++ return "xvpermi.d\t%u0,%u1,%2"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++;; xvpermi.q ++(define_insn "lasx_xvpermi_q_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX ++ [(match_operand:LASX 1 "register_operand" "0") ++ (match_operand:LASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand")] ++ UNSPEC_LASX_XVPERMI_Q))] ++ "ISA_HAS_LASX" ++ "xvpermi.q\t%u0,%u2,%3" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpickve2gr_d" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (any_extend:DI ++ (vec_select:DI ++ (match_operand:V4DI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_3_operand" "")]))))] ++ "ISA_HAS_LASX" ++ "xvpickve2gr.d\t%0,%u1,%2" ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "V4DI")]) ++ ++(define_expand "vec_set" ++ [(match_operand:ILASX_DW 0 "register_operand") ++ (match_operand: 1 "reg_or_0_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx index = GEN_INT (1 << INTVAL (operands[2])); ++ emit_insn (gen_lasx_xvinsgr2vr_ (operands[0], operands[1], ++ operands[0], index)); ++ DONE; ++}) ++ ++(define_expand "vec_set" ++ [(match_operand:FLASX 0 "register_operand") ++ (match_operand: 1 "reg_or_0_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx index = GEN_INT (1 << INTVAL (operands[2])); ++ emit_insn (gen_lasx_xvinsve0__scalar (operands[0], operands[1], ++ operands[0], index)); ++ DONE; ++}) ++ ++(define_expand "vec_extract" ++ [(match_operand: 0 "register_operand") ++ (match_operand:LASX 1 "register_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vector_extract (operands[0], operands[1], ++ INTVAL (operands[2])); ++ DONE; ++}) ++ ++(define_expand "vec_perm" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "register_operand") ++ (match_operand:LASX 2 "register_operand") ++ (match_operand: 3 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_perm_1 (operands); ++ DONE; ++}) ++ ++;; FIXME: 256?? ++(define_expand "vcondu" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "reg_or_m1_operand") ++ (match_operand:LASX 2 "reg_or_0_operand") ++ (match_operator 3 "" ++ [(match_operand:ILASX 4 "register_operand") ++ (match_operand:ILASX 5 "register_operand")])] ++ "ISA_HAS_LASX ++ && (GET_MODE_NUNITS (mode) ++ == GET_MODE_NUNITS (mode))" ++{ ++ loongarch_expand_vec_cond_expr (mode, mode, ++ operands); ++ DONE; ++}) ++ ++;; FIXME: 256?? ++(define_expand "vcond" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "reg_or_m1_operand") ++ (match_operand:LASX 2 "reg_or_0_operand") ++ (match_operator 3 "" ++ [(match_operand:LASX_2 4 "register_operand") ++ (match_operand:LASX_2 5 "register_operand")])] ++ "ISA_HAS_LASX ++ && (GET_MODE_NUNITS (mode) ++ == GET_MODE_NUNITS (mode))" ++{ ++ loongarch_expand_vec_cond_expr (mode, mode, ++ operands); ++ DONE; ++}) ++ ++;; Same as vcond_ ++(define_expand "vcond_mask_" ++ [(match_operand:ILASX 0 "register_operand") ++ (match_operand:ILASX 1 "reg_or_m1_operand") ++ (match_operand:ILASX 2 "reg_or_0_operand") ++ (match_operand:ILASX 3 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_cond_mask_expr (mode, ++ mode, operands); ++ DONE; ++}) ++ ++(define_expand "lasx_xvrepli" ++ [(match_operand:ILASX 0 "register_operand") ++ (match_operand 1 "const_imm10_operand")] ++ "ISA_HAS_LASX" ++{ ++ if (mode == V32QImode) ++ operands[1] = GEN_INT (trunc_int_for_mode (INTVAL (operands[1]), ++ mode)); ++ emit_move_insn (operands[0], ++ loongarch_gen_const_int_vector (mode, INTVAL (operands[1]))); ++ DONE; ++}) ++ ++(define_expand "mov" ++ [(set (match_operand:LASX 0) ++ (match_operand:LASX 1))] ++ "ISA_HAS_LASX" ++{ ++ if (loongarch_legitimize_move (mode, operands[0], operands[1])) ++ DONE; ++}) ++ ++ ++(define_expand "movmisalign" ++ [(set (match_operand:LASX 0) ++ (match_operand:LASX 1))] ++ "ISA_HAS_LASX" ++{ ++ if (loongarch_legitimize_move (mode, operands[0], operands[1])) ++ DONE; ++}) ++ ++;; 256-bit LASX modes can only exist in LASX registers or memory. ++(define_insn "mov_lasx" ++ [(set (match_operand:LASX 0 "nonimmediate_operand" "=f,f,R,*r,*f") ++ (match_operand:LASX 1 "move_operand" "fYGYI,R,f,*f,*r"))] ++ "ISA_HAS_LASX" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "type" "simd_move,simd_load,simd_store,simd_copy,simd_insert") ++ (set_attr "mode" "") ++ (set_attr "length" "8,4,4,4,4")]) ++ ++ ++(define_split ++ [(set (match_operand:LASX 0 "nonimmediate_operand") ++ (match_operand:LASX 1 "move_operand"))] ++ "reload_completed && ISA_HAS_LASX ++ && loongarch_split_move_insn_p (operands[0], operands[1])" ++ [(const_int 0)] ++{ ++ loongarch_split_move_insn (operands[0], operands[1], curr_insn); ++ DONE; ++}) ++ ++;; Offset load ++(define_expand "lasx_mxld_" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq10_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (operands[0], gen_rtx_MEM (mode, addr)); ++ DONE; ++}) ++ ++;; Offset store ++(define_expand "lasx_mxst_" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq10_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (gen_rtx_MEM (mode, addr), operands[0]); ++ DONE; ++}) ++ ++;; LASX ++(define_insn "add3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f,f") ++ (plus:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_ximm5_operand" "f,Unv5,Uuv5")))] ++ "ISA_HAS_LASX" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "xvadd.\t%u0,%u1,%u2"; ++ case 1: ++ { ++ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0)); ++ ++ operands[2] = GEN_INT (-val); ++ return "xvsubi.\t%u0,%u1,%d2"; ++ } ++ case 2: ++ return "xvaddi.\t%u0,%u1,%E2"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "alu_type" "simd_add") ++ (set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "sub3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (minus:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvsub.\t%u0,%u1,%u2 ++ xvsubi.\t%u0,%u1,%E2" ++ [(set_attr "alu_type" "simd_add") ++ (set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "mul3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (mult:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvmul.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvmadd_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (plus:ILASX (mult:ILASX (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand:ILASX 3 "register_operand" "f")) ++ (match_operand:ILASX 1 "register_operand" "0")))] ++ "ISA_HAS_LASX" ++ "xvmadd.\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++ ++ ++(define_insn "lasx_xvmsub_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (minus:ILASX (match_operand:ILASX 1 "register_operand" "0") ++ (mult:ILASX (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand:ILASX 3 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvmsub.\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++(define_insn "div3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (div:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_lsx_output_division ("xvdiv.\t%u0,%u1,%u2", ++ operands); ++} ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "udiv3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (udiv:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_lsx_output_division ("xvdiv.\t%u0,%u1,%u2", ++ operands); ++} ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "mod3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (mod:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_lsx_output_division ("xvmod.\t%u0,%u1,%u2", ++ operands); ++} ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "umod3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (umod:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_lsx_output_division ("xvmod.\t%u0,%u1,%u2", ++ operands); ++} ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "xor3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f,f") ++ (xor:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvxor.v\t%u0,%u1,%u2 ++ xvbitrevi.%v0\t%u0,%u1,%V2 ++ xvxori.b\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "ior3" ++ [(set (match_operand:LASX 0 "register_operand" "=f,f,f") ++ (ior:LASX ++ (match_operand:LASX 1 "register_operand" "f,f,f") ++ (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvor.v\t%u0,%u1,%u2 ++ xvbitseti.%v0\t%u0,%u1,%V2 ++ xvori.b\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "and3" ++ [(set (match_operand:LASX 0 "register_operand" "=f,f,f") ++ (and:LASX ++ (match_operand:LASX 1 "register_operand" "f,f,f") ++ (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YZ,Urv8")))] ++ "ISA_HAS_LASX" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "xvand.v\t%u0,%u1,%u2"; ++ case 1: ++ { ++ rtx elt0 = CONST_VECTOR_ELT (operands[2], 0); ++ unsigned HOST_WIDE_INT val = ~UINTVAL (elt0); ++ operands[2] = loongarch_gen_const_int_vector (mode, val & (-val)); ++ return "xvbitclri.%v0\t%u0,%u1,%V2"; ++ } ++ case 2: ++ return "xvandi.b\t%u0,%u1,%B2"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "one_cmpl2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (not:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvnor.v\t%u0,%u1,%u1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V32QI")]) ++ ++;; LASX ++(define_insn "vlshr3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (lshiftrt:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvsrl.\t%u0,%u1,%u2 ++ xvsrli.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; LASX ">>" ++(define_insn "vashr3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (ashiftrt:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvsra.\t%u0,%u1,%u2 ++ xvsrai.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; LASX "<<" ++(define_insn "vashl3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (ashift:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvsll.\t%u0,%u1,%u2 ++ xvslli.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "add3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (plus:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfadd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "sub3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (minus:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfsub.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "mul3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (mult:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmul.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fmul") ++ (set_attr "mode" "")]) ++ ++(define_insn "div3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (div:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfdiv.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "fma4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (fma:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (match_operand:FLASX 3 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmadd.\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "fnma4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (fma:FLASX (neg:FLASX (match_operand:FLASX 1 "register_operand" "f")) ++ (match_operand:FLASX 2 "register_operand" "f") ++ (match_operand:FLASX 3 "register_operand" "0")))] ++ "ISA_HAS_LASX" ++ "xvfnmsub.\t%u0,%u1,%u2,%u0" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "sqrt2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (sqrt:FLASX (match_operand:FLASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfsqrt.\t%u0,%u1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvadda_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (plus:ILASX (abs:ILASX (match_operand:ILASX 1 "register_operand" "f")) ++ (abs:ILASX (match_operand:ILASX 2 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvadda.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "ssadd3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (ss_plus:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvsadd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "usadd3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (us_plus:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvsadd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvabsd_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVABSD_S))] ++ "ISA_HAS_LASX" ++ "xvabsd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvabsd_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVABSD_U))] ++ "ISA_HAS_LASX" ++ "xvabsd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvavg_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVAVG_S))] ++ "ISA_HAS_LASX" ++ "xvavg.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvavg_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVAVG_U))] ++ "ISA_HAS_LASX" ++ "xvavg.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvavgr_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVAVGR_S))] ++ "ISA_HAS_LASX" ++ "xvavgr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvavgr_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVAVGR_U))] ++ "ISA_HAS_LASX" ++ "xvavgr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitclr_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVBITCLR))] ++ "ISA_HAS_LASX" ++ "xvbitclr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitclri_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVBITCLRI))] ++ "ISA_HAS_LASX" ++ "xvbitclri.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitrev_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVBITREV))] ++ "ISA_HAS_LASX" ++ "xvbitrev.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitrevi_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVBITREVI))] ++ "ISA_HAS_LASX" ++ "xvbitrevi.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitsel_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (ior:LASX (and:LASX (not:LASX ++ (match_operand:LASX 3 "register_operand" "f")) ++ (match_operand:LASX 1 "register_operand" "f")) ++ (and:LASX (match_dup 3) ++ (match_operand:LASX 2 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvbitsel.v\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_bitmov") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitseli_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (ior:V32QI (and:V32QI (not:V32QI ++ (match_operand:V32QI 1 "register_operand" "0")) ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (and:V32QI (match_dup 1) ++ (match_operand:V32QI 3 "const_vector_same_val_operand" "Urv8"))))] ++ "ISA_HAS_LASX" ++ "xvbitseli.b\t%u0,%u2,%B3" ++ [(set_attr "type" "simd_bitmov") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvbitset_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVBITSET))] ++ "ISA_HAS_LASX" ++ "xvbitset.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitseti_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVBITSETI))] ++ "ISA_HAS_LASX" ++ "xvbitseti.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvs_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (ICC:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_imm5_operand" "f,Uv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvs.\t%u0,%u1,%u2 ++ xvs.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_expand "vec_cmp" ++ [(set (match_operand: 0 "register_operand") ++ (match_operator 1 "" ++ [(match_operand:LASX 2 "register_operand") ++ (match_operand:LASX 3 "register_operand")]))] ++ "ISA_HAS_LASX" ++{ ++ bool ok = loongarch_expand_vec_cmp (operands); ++ gcc_assert (ok); ++ DONE; ++}) ++ ++(define_expand "vec_cmpu" ++ [(set (match_operand: 0 "register_operand") ++ (match_operator 1 "" ++ [(match_operand:ILASX 2 "register_operand") ++ (match_operand:ILASX 3 "register_operand")]))] ++ "ISA_HAS_LASX" ++{ ++ bool ok = loongarch_expand_vec_cmp (operands); ++ gcc_assert (ok); ++ DONE; ++}) ++ ++(define_insn "lasx_xvfclass_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFCLASS))] ++ "ISA_HAS_LASX" ++ "xvfclass.\t%u0,%u1" ++ [(set_attr "type" "simd_fclass") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfcmp_caf_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFCMP_CAF))] ++ "ISA_HAS_LASX" ++ "xvfcmp.caf.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfcmp_cune_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFCMP_CUNE))] ++ "ISA_HAS_LASX" ++ "xvfcmp.cune.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++ ++ ++(define_int_iterator FSC256_UNS [UNSPEC_LASX_XVFCMP_SAF UNSPEC_LASX_XVFCMP_SUN ++ UNSPEC_LASX_XVFCMP_SOR UNSPEC_LASX_XVFCMP_SEQ ++ UNSPEC_LASX_XVFCMP_SNE UNSPEC_LASX_XVFCMP_SUEQ ++ UNSPEC_LASX_XVFCMP_SUNE UNSPEC_LASX_XVFCMP_SULE ++ UNSPEC_LASX_XVFCMP_SULT UNSPEC_LASX_XVFCMP_SLE ++ UNSPEC_LASX_XVFCMP_SLT]) ++ ++(define_int_attr fsc256 ++ [(UNSPEC_LASX_XVFCMP_SAF "saf") ++ (UNSPEC_LASX_XVFCMP_SUN "sun") ++ (UNSPEC_LASX_XVFCMP_SOR "sor") ++ (UNSPEC_LASX_XVFCMP_SEQ "seq") ++ (UNSPEC_LASX_XVFCMP_SNE "sne") ++ (UNSPEC_LASX_XVFCMP_SUEQ "sueq") ++ (UNSPEC_LASX_XVFCMP_SUNE "sune") ++ (UNSPEC_LASX_XVFCMP_SULE "sule") ++ (UNSPEC_LASX_XVFCMP_SULT "sult") ++ (UNSPEC_LASX_XVFCMP_SLE "sle") ++ (UNSPEC_LASX_XVFCMP_SLT "slt")]) ++ ++(define_insn "lasx_xvfcmp__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vfcond: (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfcmp..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "lasx_xvfcmp__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")] ++ FSC256_UNS))] ++ "ISA_HAS_LASX" ++ "xvfcmp..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++ ++(define_mode_attr fint256 ++ [(V8SF "v8si") ++ (V4DF "v4di")]) ++ ++(define_mode_attr FINTCNV256 ++ [(V8SF "I2S") ++ (V4DF "I2D")]) ++ ++(define_mode_attr FINTCNV256_2 ++ [(V8SF "S2I") ++ (V4DF "D2I")]) ++ ++(define_insn "float2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (float:FLASX (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvffint..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "floatuns2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unsigned_float:FLASX ++ (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvffint..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_mode_attr FFQ256 ++ [(V4SF "V16HI") ++ (V2DF "V8SI")]) ++ ++(define_insn "lasx_xvreplgr2vr_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (vec_duplicate:ILASX ++ (match_operand: 1 "reg_or_0_operand" "r,J")))] ++ "ISA_HAS_LASX" ++{ ++ if (which_alternative == 1) ++ return "xvldi.b\t%u0,0" ; ++ ++ if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode)) ++ return "#"; ++ else ++ return "xvreplgr2vr.\t%u0,%z1"; ++} ++ [(set_attr "type" "simd_fill") ++ (set_attr "mode" "") ++ (set_attr "length" "8")]) ++ ++(define_insn "logb2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFLOGB))] ++ "ISA_HAS_LASX" ++ "xvflogb.\t%u0,%u1" ++ [(set_attr "type" "simd_flog2") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "smax3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (smax:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmax.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfmaxa_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (if_then_else:FLASX ++ (gt (abs:FLASX (match_operand:FLASX 1 "register_operand" "f")) ++ (abs:FLASX (match_operand:FLASX 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "ISA_HAS_LASX" ++ "xvfmaxa.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "smin3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (smin:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmin.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfmina_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (if_then_else:FLASX ++ (lt (abs:FLASX (match_operand:FLASX 1 "register_operand" "f")) ++ (abs:FLASX (match_operand:FLASX 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "ISA_HAS_LASX" ++ "xvfmina.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrecip_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRECIP))] ++ "ISA_HAS_LASX" ++ "xvfrecip.\t%u0,%u1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrint_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINT))] ++ "ISA_HAS_LASX" ++ "xvfrint.\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrsqrt_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRSQRT))] ++ "ISA_HAS_LASX" ++ "xvfrsqrt.\t%u0,%u1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvftint_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINT_S))] ++ "ISA_HAS_LASX" ++ "xvftint..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvftint_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINT_U))] ++ "ISA_HAS_LASX" ++ "xvftint..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++ ++ ++(define_insn "fix_trunc2" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (fix: (match_operand:FLASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvftintrz..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "fixuns_trunc2" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unsigned_fix: (match_operand:FLASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvftintrz..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvhw_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addsub:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))))] ++ "ISA_HAS_LASX" ++ "xvhw.h.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvhw_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addsub:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LASX" ++ "xvhw.w.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvhw_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addsub:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LASX" ++ "xvhw.d.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvpackev_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 32) ++ (const_int 2) (const_int 34) ++ (const_int 4) (const_int 36) ++ (const_int 6) (const_int 38) ++ (const_int 8) (const_int 40) ++ (const_int 10) (const_int 42) ++ (const_int 12) (const_int 44) ++ (const_int 14) (const_int 46) ++ (const_int 16) (const_int 48) ++ (const_int 18) (const_int 50) ++ (const_int 20) (const_int 52) ++ (const_int 22) (const_int 54) ++ (const_int 24) (const_int 56) ++ (const_int 26) (const_int 58) ++ (const_int 28) (const_int 60) ++ (const_int 30) (const_int 62)])))] ++ "ISA_HAS_LASX" ++ "xvpackev.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++ ++(define_insn "lasx_xvpackev_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 16) ++ (const_int 2) (const_int 18) ++ (const_int 4) (const_int 20) ++ (const_int 6) (const_int 22) ++ (const_int 8) (const_int 24) ++ (const_int 10) (const_int 26) ++ (const_int 12) (const_int 28) ++ (const_int 14) (const_int 30)])))] ++ "ISA_HAS_LASX" ++ "xvpackev.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvpackev_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 2) (const_int 10) ++ (const_int 4) (const_int 12) ++ (const_int 6) (const_int 14)])))] ++ "ISA_HAS_LASX" ++ "xvpackev.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvpackev_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 2) (const_int 10) ++ (const_int 4) (const_int 12) ++ (const_int 6) (const_int 14)])))] ++ "ISA_HAS_LASX" ++ "xvpackev.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvilvh_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 8) (const_int 40) ++ (const_int 9) (const_int 41) ++ (const_int 10) (const_int 42) ++ (const_int 11) (const_int 43) ++ (const_int 12) (const_int 44) ++ (const_int 13) (const_int 45) ++ (const_int 14) (const_int 46) ++ (const_int 15) (const_int 47) ++ (const_int 24) (const_int 56) ++ (const_int 25) (const_int 57) ++ (const_int 26) (const_int 58) ++ (const_int 27) (const_int 59) ++ (const_int 28) (const_int 60) ++ (const_int 29) (const_int 61) ++ (const_int 30) (const_int 62) ++ (const_int 31) (const_int 63)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvilvh_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 4) (const_int 20) ++ (const_int 5) (const_int 21) ++ (const_int 6) (const_int 22) ++ (const_int 7) (const_int 23) ++ (const_int 12) (const_int 28) ++ (const_int 13) (const_int 29) ++ (const_int 14) (const_int 30) ++ (const_int 15) (const_int 31)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_mode_attr xvilvh_suffix ++ [(V8SI "") (V8SF "_f") ++ (V4DI "") (V4DF "_f")]) ++ ++(define_insn "lasx_xvilvh_w" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (vec_select:LASX_W ++ (vec_concat: ++ (match_operand:LASX_W 1 "register_operand" "f") ++ (match_operand:LASX_W 2 "register_operand" "f")) ++ (parallel [(const_int 2) (const_int 10) ++ (const_int 3) (const_int 11) ++ (const_int 6) (const_int 14) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvilvh_d" ++ [(set (match_operand:LASX_D 0 "register_operand" "=f") ++ (vec_select:LASX_D ++ (vec_concat: ++ (match_operand:LASX_D 1 "register_operand" "f") ++ (match_operand:LASX_D 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 5) ++ (const_int 3) (const_int 7)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.d\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpackod_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 33) ++ (const_int 3) (const_int 35) ++ (const_int 5) (const_int 37) ++ (const_int 7) (const_int 39) ++ (const_int 9) (const_int 41) ++ (const_int 11) (const_int 43) ++ (const_int 13) (const_int 45) ++ (const_int 15) (const_int 47) ++ (const_int 17) (const_int 49) ++ (const_int 19) (const_int 51) ++ (const_int 21) (const_int 53) ++ (const_int 23) (const_int 55) ++ (const_int 25) (const_int 57) ++ (const_int 27) (const_int 59) ++ (const_int 29) (const_int 61) ++ (const_int 31) (const_int 63)])))] ++ "ISA_HAS_LASX" ++ "xvpackod.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++ ++(define_insn "lasx_xvpackod_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 17) ++ (const_int 3) (const_int 19) ++ (const_int 5) (const_int 21) ++ (const_int 7) (const_int 23) ++ (const_int 9) (const_int 25) ++ (const_int 11) (const_int 27) ++ (const_int 13) (const_int 29) ++ (const_int 15) (const_int 31)])))] ++ "ISA_HAS_LASX" ++ "xvpackod.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++ ++(define_insn "lasx_xvpackod_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 9) ++ (const_int 3) (const_int 11) ++ (const_int 5) (const_int 13) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvpackod.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++ ++(define_insn "lasx_xvpackod_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 9) ++ (const_int 3) (const_int 11) ++ (const_int 5) (const_int 13) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvpackod.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvilvl_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 32) ++ (const_int 1) (const_int 33) ++ (const_int 2) (const_int 34) ++ (const_int 3) (const_int 35) ++ (const_int 4) (const_int 36) ++ (const_int 5) (const_int 37) ++ (const_int 6) (const_int 38) ++ (const_int 7) (const_int 39) ++ (const_int 16) (const_int 48) ++ (const_int 17) (const_int 49) ++ (const_int 18) (const_int 50) ++ (const_int 19) (const_int 51) ++ (const_int 20) (const_int 52) ++ (const_int 21) (const_int 53) ++ (const_int 22) (const_int 54) ++ (const_int 23) (const_int 55)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvilvl_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 16) ++ (const_int 1) (const_int 17) ++ (const_int 2) (const_int 18) ++ (const_int 3) (const_int 19) ++ (const_int 8) (const_int 24) ++ (const_int 9) (const_int 25) ++ (const_int 10) (const_int 26) ++ (const_int 11) (const_int 27)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvilvl_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 1) (const_int 9) ++ (const_int 4) (const_int 12) ++ (const_int 5) (const_int 13)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvilvl_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 1) (const_int 9) ++ (const_int 4) (const_int 12) ++ (const_int 5) (const_int 13)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvilvl_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (vec_select:V4DI ++ (vec_concat:V8DI ++ (match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 2) (const_int 6)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.d\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvilvl_d_f" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (vec_select:V4DF ++ (vec_concat:V8DF ++ (match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 2) (const_int 6)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.d\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "smax3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (smax:ILASX (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvmax.\t%u0,%u1,%u2 ++ xvmaxi.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "umax3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (umax:ILASX (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvmax.\t%u0,%u1,%u2 ++ xvmaxi.\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "smin3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (smin:ILASX (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvmin.\t%u0,%u1,%u2 ++ xvmini.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "umin3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (umin:ILASX (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvmin.\t%u0,%u1,%u2 ++ xvmini.\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvclo_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (clz:ILASX (not:ILASX (match_operand:ILASX 1 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvclo.\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "clz2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (clz:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvclz.\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvnor_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (and:ILASX (not:ILASX (match_operand:ILASX 1 "register_operand" "f,f")) ++ (not:ILASX (match_operand:ILASX 2 "reg_or_vector_same_val_operand" "f,Urv8"))))] ++ "ISA_HAS_LASX" ++ "@ ++ xvnor.v\t%u0,%u1,%u2 ++ xvnori.b\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpickev_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 32) (const_int 34) ++ (const_int 36) (const_int 38) ++ (const_int 40) (const_int 42) ++ (const_int 44) (const_int 46) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30) ++ (const_int 48) (const_int 50) ++ (const_int 52) (const_int 54) ++ (const_int 56) (const_int 58) ++ (const_int 60) (const_int 62)])))] ++ "ISA_HAS_LASX" ++ "xvpickev.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvpickev_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))] ++ "ISA_HAS_LASX" ++ "xvpickev.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvpickev_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 8) (const_int 10) ++ (const_int 4) (const_int 6) ++ (const_int 12) (const_int 14)])))] ++ "ISA_HAS_LASX" ++ "xvpickev.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvpickev_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 8) (const_int 10) ++ (const_int 4) (const_int 6) ++ (const_int 12) (const_int 14)])))] ++ "ISA_HAS_LASX" ++ "xvpickev.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvpickod_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 33) (const_int 35) ++ (const_int 37) (const_int 39) ++ (const_int 41) (const_int 43) ++ (const_int 45) (const_int 47) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31) ++ (const_int 49) (const_int 51) ++ (const_int 53) (const_int 55) ++ (const_int 57) (const_int 59) ++ (const_int 61) (const_int 63)])))] ++ "ISA_HAS_LASX" ++ "xvpickod.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvpickod_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)])))] ++ "ISA_HAS_LASX" ++ "xvpickod.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvpickod_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 9) (const_int 11) ++ (const_int 5) (const_int 7) ++ (const_int 13) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvpickod.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvpickod_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 9) (const_int 11) ++ (const_int 5) (const_int 7) ++ (const_int 13) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvpickod.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "popcount2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (popcount:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvpcnt.\t%u0,%u1" ++ [(set_attr "type" "simd_pcnt") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "lasx_xvsat_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSAT_S))] ++ "ISA_HAS_LASX" ++ "xvsat.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_sat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsat_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSAT_U))] ++ "ISA_HAS_LASX" ++ "xvsat.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_sat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf4i_" ++ [(set (match_operand:LASX_WHB_W 0 "register_operand" "=f") ++ (unspec:LASX_WHB_W [(match_operand:LASX_WHB_W 1 "register_operand" "f") ++ (match_operand 2 "const_uimm8_operand")] ++ UNSPEC_LASX_XVSHUF4I))] ++ "ISA_HAS_LASX" ++ "xvshuf4i.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf4i__1" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (vec_select:LASX_W ++ (match_operand:LASX_W 1 "nonimmediate_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_3_operand") ++ (match_operand 3 "const_0_to_3_operand") ++ (match_operand 4 "const_0_to_3_operand") ++ (match_operand 5 "const_0_to_3_operand") ++ (match_operand 6 "const_4_to_7_operand") ++ (match_operand 7 "const_4_to_7_operand") ++ (match_operand 8 "const_4_to_7_operand") ++ (match_operand 9 "const_4_to_7_operand")])))] ++ "ISA_HAS_LASX ++ && INTVAL (operands[2]) + 4 == INTVAL (operands[6]) ++ && INTVAL (operands[3]) + 4 == INTVAL (operands[7]) ++ && INTVAL (operands[4]) + 4 == INTVAL (operands[8]) ++ && INTVAL (operands[5]) + 4 == INTVAL (operands[9])" ++{ ++ int mask = 0; ++ mask |= INTVAL (operands[2]) << 0; ++ mask |= INTVAL (operands[3]) << 2; ++ mask |= INTVAL (operands[4]) << 4; ++ mask |= INTVAL (operands[5]) << 6; ++ operands[2] = GEN_INT (mask); ++ ++ return "xvshuf4i.w\t%u0,%u1,%2"; ++} ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrar_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRAR))] ++ "ISA_HAS_LASX" ++ "xvsrar.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrari_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSRARI))] ++ "ISA_HAS_LASX" ++ "xvsrari.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrlr_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRLR))] ++ "ISA_HAS_LASX" ++ "xvsrlr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrlri_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSRLRI))] ++ "ISA_HAS_LASX" ++ "xvsrlri.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssub_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (ss_minus:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvssub.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssub_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (us_minus:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvssub.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf_" ++ [(set (match_operand:LASX_DWH 0 "register_operand" "=f") ++ (unspec:LASX_DWH [(match_operand:LASX_DWH 1 "register_operand" "0") ++ (match_operand:LASX_DWH 2 "register_operand" "f") ++ (match_operand:LASX_DWH 3 "register_operand" "f")] ++ UNSPEC_LASX_XVSHUF))] ++ "ISA_HAS_LASX" ++ "xvshuf.\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_sld") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f") ++ (match_operand:V32QI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVSHUF_B))] ++ "ISA_HAS_LASX" ++ "xvshuf.b\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_sld") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvreplve0_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (vec_duplicate:LASX ++ (vec_select: ++ (match_operand:LASX 1 "register_operand" "f") ++ (parallel [(const_int 0)]))))] ++ "ISA_HAS_LASX" ++ "xvreplve0.\t%u0,%u1" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvrepl128vei_b_internal" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_duplicate:V32QI ++ (vec_select:V32QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_uimm4_operand" "") ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_operand 3 "const_16_to_31_operand" "") ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3)]))))] ++ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 16)" ++ "xvrepl128vei.b\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvrepl128vei_h_internal" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_duplicate:V16HI ++ (vec_select:V16HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_uimm3_operand" "") ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) ++ (match_operand 3 "const_8_to_15_operand" "") ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3)]))))] ++ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 8)" ++ "xvrepl128vei.h\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvrepl128vei_w_internal" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_duplicate:V8SI ++ (vec_select:V8SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_3_operand" "") ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_operand 3 "const_4_to_7_operand" "") ++ (match_dup 3) (match_dup 3) (match_dup 3)]))))] ++ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 4)" ++ "xvrepl128vei.w\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvrepl128vei_d_internal" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (vec_duplicate:V4DI ++ (vec_select:V4DI ++ (match_operand:V4DI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_or_1_operand" "") ++ (match_dup 2) ++ (match_operand 3 "const_2_or_3_operand" "") ++ (match_dup 3)]))))] ++ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 2)" ++ "xvrepl128vei.d\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvrepl128vei_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX [(match_operand:LASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVREPL128VEI))] ++ "ISA_HAS_LASX" ++ "xvrepl128vei.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvreplve0__scalar" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (vec_duplicate:FLASX ++ (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvreplve0.\t%u0,%u1" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvreplve0_q" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVREPLVE0_Q))] ++ "ISA_HAS_LASX" ++ "xvreplve0.q\t%u0,%u1" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvfcvt_h_s" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (unspec:V16HI [(match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVT))] ++ "ISA_HAS_LASX" ++ "xvfcvt.h.s\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvfcvt_s_d" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVT))] ++ "ISA_HAS_LASX" ++ "xvfcvt.s.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "vec_pack_trunc_v4df" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_concat:V8SF ++ (float_truncate:V4SF (match_operand:V4DF 1 "register_operand" "f")) ++ (float_truncate:V4SF (match_operand:V4DF 2 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvfcvt.s.d\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SF") ++ (set_attr "length" "8")]) ++ ++;; Define for builtin function. ++(define_insn "lasx_xvfcvth_s_h" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V16HI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVTH))] ++ "ISA_HAS_LASX" ++ "xvfcvth.s.h\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SF")]) ++ ++;; Define for builtin function. ++(define_insn "lasx_xvfcvth_d_s" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (parallel [(const_int 2) (const_int 3) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "xvfcvth.d.s\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DF") ++ (set_attr "length" "12")]) ++ ++;; Define for gen insn. ++(define_insn "lasx_xvfcvth_d_insn" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (parallel [(const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "xvpermi.d\t%u0,%u1,0xfa\n\txvfcvtl.d.s\t%u0,%u0" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DF") ++ (set_attr "length" "12")]) ++ ++;; Define for builtin function. ++(define_insn "lasx_xvfcvtl_s_h" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V16HI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVTL))] ++ "ISA_HAS_LASX" ++ "xvfcvtl.s.h\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SF")]) ++ ++;; Define for builtin function. ++(define_insn "lasx_xvfcvtl_d_s" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 4) (const_int 5)]))))] ++ "ISA_HAS_LASX" ++ "xvfcvtl.d.s\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DF") ++ (set_attr "length" "8")]) ++ ++;; Define for gen insn. ++(define_insn "lasx_xvfcvtl_d_insn" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LASX" ++ "xvpermi.d\t%u0,%u1,0x50\n\txvfcvtl.d.s\t%u0,%u0" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DF") ++ (set_attr "length" "8")]) ++ ++(define_code_attr lasxbr ++ [(eq "xbz") ++ (ne "xbnz")]) ++ ++(define_code_attr lasxeq_v ++ [(eq "eqz") ++ (ne "nez")]) ++ ++(define_code_attr lasxne_v ++ [(eq "nez") ++ (ne "eqz")]) ++ ++(define_code_attr lasxeq ++ [(eq "anyeqz") ++ (ne "allnez")]) ++ ++(define_code_attr lasxne ++ [(eq "allnez") ++ (ne "anyeqz")]) ++ ++(define_insn "lasx__" ++ [(set (pc) ++ (if_then_else ++ (equality_op ++ (unspec:SI [(match_operand:LASX 1 "register_operand" "f")] ++ UNSPEC_LASX_BRANCH) ++ (match_operand:SI 2 "const_0_operand")) ++ (label_ref (match_operand 0)) ++ (pc))) ++ (clobber (match_scratch:FCC 3 "=z"))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_output_conditional_branch (insn, operands, ++ "xvset.\t%Z3%u1\n\tbcnez\t%Z3%0", ++ "xvset.\t%z3%u1\n\tbcnez\t%Z3%0"); ++} ++ [(set_attr "type" "simd_branch") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx__v_" ++ [(set (pc) ++ (if_then_else ++ (equality_op ++ (unspec:SI [(match_operand:LASX 1 "register_operand" "f")] ++ UNSPEC_LASX_BRANCH_V) ++ (match_operand:SI 2 "const_0_operand")) ++ (label_ref (match_operand 0)) ++ (pc))) ++ (clobber (match_scratch:FCC 3 "=z"))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_output_conditional_branch (insn, operands, ++ "xvset.v\t%Z3%u1\n\tbcnez\t%Z3%0", ++ "xvset.v\t%Z3%u1\n\tbcnez\t%Z3%0"); ++} ++ [(set_attr "type" "simd_branch") ++ (set_attr "mode" "")]) ++ ++;; loongson-asx. ++(define_insn "lasx_vext2xv_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3) ++ (const_int 4) (const_int 5) ++ (const_int 6) (const_int 7) ++ (const_int 8) (const_int 9) ++ (const_int 10) (const_int 11) ++ (const_int 12) (const_int 13) ++ (const_int 14) (const_int 15)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.h.b\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_vext2xv_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3) ++ (const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.w.h\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_vext2xv_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.d.w\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_vext2xv_w_b" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (any_extend:V8SI ++ (vec_select:V8QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3) ++ (const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.w.b\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_vext2xv_d_h" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (any_extend:V4DI ++ (vec_select:V4HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.d.h\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_vext2xv_d_b" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (any_extend:V4DI ++ (vec_select:V4QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.d.b\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DI")]) ++ ++;; Extend loongson-sx to loongson-asx. ++(define_insn "xvandn3" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (and:LASX (not:LASX (match_operand:LASX 1 "register_operand" "f")) ++ (match_operand:LASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvandn.v\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "abs2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (abs:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvsigncov.\t%u0,%u1,%u1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "neg2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (neg:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvneg.\t%u0,%u1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvmuh_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMUH_S))] ++ "ISA_HAS_LASX" ++ "xvmuh.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvmuh_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMUH_U))] ++ "ISA_HAS_LASX" ++ "xvmuh.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsllwil_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_WHB 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSLLWIL_S))] ++ "ISA_HAS_LASX" ++ "xvsllwil..\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsllwil_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_WHB 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSLLWIL_U))] ++ "ISA_HAS_LASX" ++ "xvsllwil..\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsran__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRAN))] ++ "ISA_HAS_LASX" ++ "xvsran..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssran_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRAN_S))] ++ "ISA_HAS_LASX" ++ "xvssran..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssran_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRAN_U))] ++ "ISA_HAS_LASX" ++ "xvssran..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrarn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRARN))] ++ "ISA_HAS_LASX" ++ "xvsrarn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrarn_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRARN_S))] ++ "ISA_HAS_LASX" ++ "xvssrarn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrarn_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRARN_U))] ++ "ISA_HAS_LASX" ++ "xvssrarn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrln__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRLN))] ++ "ISA_HAS_LASX" ++ "xvsrln..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrln_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRLN_U))] ++ "ISA_HAS_LASX" ++ "xvssrln..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrlrn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRLRN))] ++ "ISA_HAS_LASX" ++ "xvsrlrn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlrn_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRLRN_U))] ++ "ISA_HAS_LASX" ++ "xvssrlrn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrstpi_" ++ [(set (match_operand:ILASX_HB 0 "register_operand" "=f") ++ (unspec:ILASX_HB [(match_operand:ILASX_HB 1 "register_operand" "0") ++ (match_operand:ILASX_HB 2 "register_operand" "f") ++ (match_operand 3 "const_uimm5_operand" "")] ++ UNSPEC_LASX_XVFRSTPI))] ++ "ISA_HAS_LASX" ++ "xvfrstpi.\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrstp_" ++ [(set (match_operand:ILASX_HB 0 "register_operand" "=f") ++ (unspec:ILASX_HB [(match_operand:ILASX_HB 1 "register_operand" "0") ++ (match_operand:ILASX_HB 2 "register_operand" "f") ++ (match_operand:ILASX_HB 3 "register_operand" "f")] ++ UNSPEC_LASX_XVFRSTP))] ++ "ISA_HAS_LASX" ++ "xvfrstp.\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf4i_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand")] ++ UNSPEC_LASX_XVSHUF4I))] ++ "ISA_HAS_LASX" ++ "xvshuf4i.d\t%u0,%u2,%3" ++ [(set_attr "type" "simd_sld") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvbsrl_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const_uimm5_operand" "")] ++ UNSPEC_LASX_XVBSRL_V))] ++ "ISA_HAS_LASX" ++ "xvbsrl.v\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbsll_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const_uimm5_operand" "")] ++ UNSPEC_LASX_XVBSLL_V))] ++ "ISA_HAS_LASX" ++ "xvbsll.v\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvextrins_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVEXTRINS))] ++ "ISA_HAS_LASX" ++ "xvextrins.\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvmskltz_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVMSKLTZ))] ++ "ISA_HAS_LASX" ++ "xvmskltz.\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsigncov_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSIGNCOV))] ++ "ISA_HAS_LASX" ++ "xvsigncov.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_expand "copysign3" ++ [(set (match_dup 4) ++ (and:FLASX ++ (not:FLASX (match_dup 3)) ++ (match_operand:FLASX 1 "register_operand"))) ++ (set (match_dup 5) ++ (and:FLASX (match_dup 3) ++ (match_operand:FLASX 2 "register_operand"))) ++ (set (match_operand:FLASX 0 "register_operand") ++ (ior:FLASX (match_dup 4) (match_dup 5)))] ++ "ISA_HAS_LASX" ++{ ++ operands[3] = loongarch_build_signbit_mask (mode, 1, 0); ++ ++ operands[4] = gen_reg_rtx (mode); ++ operands[5] = gen_reg_rtx (mode); ++}) ++ ++ ++(define_insn "absv4df2" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (abs:V4DF (match_operand:V4DF 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvbitclri.d\t%u0,%u1,63" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "absv8sf2" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (abs:V8SF (match_operand:V8SF 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvbitclri.w\t%u0,%u1,31" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "negv4df2" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (neg:V4DF (match_operand:V4DF 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvbitrevi.d\t%u0,%u1,63" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "negv8sf2" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (neg:V8SF (match_operand:V8SF 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvbitrevi.w\t%u0,%u1,31" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "xvfmadd4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (fma:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (match_operand:FLASX 3 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmadd.\t%u0,%u1,$u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "fms4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (fma:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (neg:FLASX (match_operand:FLASX 3 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvfmsub.\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "xvfnmsub4_nmsub4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (neg:FLASX ++ (fma:FLASX ++ (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (neg:FLASX (match_operand:FLASX 3 "register_operand" "f")))))] ++ "ISA_HAS_LASX" ++ "xvfnmsub.\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "xvfnmadd4_nmadd4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (neg:FLASX ++ (fma:FLASX ++ (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (match_operand:FLASX 3 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvfnmadd.\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvftintrne_w_s" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNE_W_S))] ++ "ISA_HAS_LASX" ++ "xvftintrne.w.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrne_l_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNE_L_D))] ++ "ISA_HAS_LASX" ++ "xvftintrne.l.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrp_w_s" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRP_W_S))] ++ "ISA_HAS_LASX" ++ "xvftintrp.w.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrp_l_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRP_L_D))] ++ "ISA_HAS_LASX" ++ "xvftintrp.l.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrm_w_s" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRM_W_S))] ++ "ISA_HAS_LASX" ++ "xvftintrm.w.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrm_l_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRM_L_D))] ++ "ISA_HAS_LASX" ++ "xvftintrm.l.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftint_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINT_W_D))] ++ "ISA_HAS_LASX" ++ "xvftint.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvffint_s_l" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFFINT_S_L))] ++ "ISA_HAS_LASX" ++ "xvffint.s.l\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvftintrz_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRZ_W_D))] ++ "ISA_HAS_LASX" ++ "xvftintrz.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrp_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRP_W_D))] ++ "ISA_HAS_LASX" ++ "xvftintrp.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrm_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRM_W_D))] ++ "ISA_HAS_LASX" ++ "xvftintrm.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrne_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNE_W_D))] ++ "ISA_HAS_LASX" ++ "xvftintrne.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftinth_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftinth.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintl_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTL_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintl.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvffinth_d_w" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V8SI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFFINTH_D_W))] ++ "ISA_HAS_LASX" ++ "xvffinth.d.w\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvffintl_d_w" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V8SI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFFINTL_D_W))] ++ "ISA_HAS_LASX" ++ "xvffintl.d.w\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvftintrzh_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRZH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrzh.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrzl_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRZL_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrzl.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lasx_xvftintrph_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRPH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrph.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lasx_xvftintrpl_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRPL_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrpl.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrmh_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRMH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrmh.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrml_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRML_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrml.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrneh_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNEH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrneh.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrnel_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNEL_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrnel.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrne_s" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRNE_S))] ++ "ISA_HAS_LASX" ++ "xvfrintrne.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrne_d" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRNE_D))] ++ "ISA_HAS_LASX" ++ "xvfrintrne.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvfrintrz_s" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRZ_S))] ++ "ISA_HAS_LASX" ++ "xvfrintrz.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrz_d" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRZ_D))] ++ "ISA_HAS_LASX" ++ "xvfrintrz.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvfrintrp_s" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRP_S))] ++ "ISA_HAS_LASX" ++ "xvfrintrp.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrp_d" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRP_D))] ++ "ISA_HAS_LASX" ++ "xvfrintrp.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvfrintrm_s" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRM_S))] ++ "ISA_HAS_LASX" ++ "xvfrintrm.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrm_d" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRM_D))] ++ "ISA_HAS_LASX" ++ "xvfrintrm.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++;; Vector versions of the floating-point frint patterns. ++;; Expands to btrunc, ceil, floor, rint. ++(define_insn "v8sf2" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] ++ FRINT256_S))] ++ "ISA_HAS_LASX" ++ "xvfrint.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "v4df2" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] ++ FRINT256_D))] ++ "ISA_HAS_LASX" ++ "xvfrint.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++;; Expands to round. ++(define_insn "round2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINT))] ++ "ISA_HAS_LASX" ++ "xvfrint.\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; Offset load and broadcast ++(define_expand "lasx_xvldrepl_" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand 2 "aq12_operand") ++ (match_operand 1 "pmode_register_operand")] ++ "ISA_HAS_LASX" ++{ ++ emit_insn (gen_lasx_xvldrepl__insn ++ (operands[0], operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_insn "lasx_xvldrepl__insn" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (vec_duplicate:LASX ++ (mem: (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "aq12_operand")))))] ++ "ISA_HAS_LASX" ++{ ++ return "xvldrepl.\t%u0,%1,%2"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++;; Offset is "0" ++(define_insn "lasx_xvldrepl__insn_0" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (vec_duplicate:LASX ++ (mem: (match_operand:DI 1 "register_operand" "r"))))] ++ "ISA_HAS_LASX" ++{ ++ return "xvldrepl.\t%u0,%1,0"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++;;XVADDWEV.H.B XVSUBWEV.H.B XVMULWEV.H.B ++;;XVADDWEV.H.BU XVSUBWEV.H.BU XVMULWEV.H.BU ++(define_insn "lasx_xvwev_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addsubmul:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.h.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVADDWEV.W.H XVSUBWEV.W.H XVMULWEV.W.H ++;;XVADDWEV.W.HU XVSUBWEV.W.HU XVMULWEV.W.HU ++(define_insn "lasx_xvwev_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addsubmul:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.w.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVADDWEV.D.W XVSUBWEV.D.W XVMULWEV.D.W ++;;XVADDWEV.D.WU XVSUBWEV.D.WU XVMULWEV.D.WU ++(define_insn "lasx_xvwev_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addsubmul:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.d.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWEV.Q.D ++;;TODO2 ++(define_insn "lasx_xvaddwev_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWEV))] ++ "ISA_HAS_LASX" ++ "xvaddwev.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUBWEV.Q.D ++;;TODO2 ++(define_insn "lasx_xvsubwev_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUBWEV))] ++ "ISA_HAS_LASX" ++ "xvsubwev.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWEV.Q.D ++;;TODO2 ++(define_insn "lasx_xvmulwev_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWEV))] ++ "ISA_HAS_LASX" ++ "xvmulwev.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++ ++;;XVADDWOD.H.B XVSUBWOD.H.B XVMULWOD.H.B ++;;XVADDWOD.H.BU XVSUBWOD.H.BU XVMULWOD.H.BU ++(define_insn "lasx_xvwod_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addsubmul:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.h.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVADDWOD.W.H XVSUBWOD.W.H XVMULWOD.W.H ++;;XVADDWOD.W.HU XVSUBWOD.W.HU XVMULWOD.W.HU ++(define_insn "lasx_xvwod_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addsubmul:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.w.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++ ++;;XVADDWOD.D.W XVSUBWOD.D.W XVMULWOD.D.W ++;;XVADDWOD.D.WU XVSUBWOD.D.WU XVMULWOD.D.WU ++(define_insn "lasx_xvwod_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addsubmul:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.d.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWOD.Q.D ++;;TODO2 ++(define_insn "lasx_xvaddwod_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWOD))] ++ "ISA_HAS_LASX" ++ "xvaddwod.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUBWOD.Q.D ++;;TODO2 ++(define_insn "lasx_xvsubwod_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUBWOD))] ++ "ISA_HAS_LASX" ++ "xvsubwod.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWOD.Q.D ++;;TODO2 ++(define_insn "lasx_xvmulwod_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWOD))] ++ "ISA_HAS_LASX" ++ "xvmulwod.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWEV.Q.DU ++;;TODO2 ++(define_insn "lasx_xvaddwev_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWEV2))] ++ "ISA_HAS_LASX" ++ "xvaddwev.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUBWEV.Q.DU ++;;TODO2 ++(define_insn "lasx_xvsubwev_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUBWEV2))] ++ "ISA_HAS_LASX" ++ "xvsubwev.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWEV.Q.DU ++;;TODO2 ++(define_insn "lasx_xvmulwev_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWEV2))] ++ "ISA_HAS_LASX" ++ "xvmulwev.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWOD.Q.DU ++;;TODO2 ++(define_insn "lasx_xvaddwod_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWOD2))] ++ "ISA_HAS_LASX" ++ "xvaddwod.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUBWOD.Q.DU ++;;TODO2 ++(define_insn "lasx_xvsubwod_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUBWOD2))] ++ "ISA_HAS_LASX" ++ "xvsubwod.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWOD.Q.DU ++;;TODO2 ++(define_insn "lasx_xvmulwod_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWOD2))] ++ "ISA_HAS_LASX" ++ "xvmulwod.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWEV.H.BU.B XVMULWEV.H.BU.B ++(define_insn "lasx_xvwev_h_bu_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addmul:V16HI ++ (zero_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))) ++ (sign_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.h.bu.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVADDWEV.W.HU.H XVMULWEV.W.HU.H ++(define_insn "lasx_xvwev_w_hu_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addmul:V8SI ++ (zero_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (sign_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.w.hu.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVADDWEV.D.WU.W XVMULWEV.D.WU.W ++(define_insn "lasx_xvwev_d_wu_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addmul:V4DI ++ (zero_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (sign_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.d.wu.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWOD.H.BU.B XVMULWOD.H.BU.B ++(define_insn "lasx_xvwod_h_bu_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addmul:V16HI ++ (zero_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (sign_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.h.bu.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVADDWOD.W.HU.H XVMULWOD.W.HU.H ++(define_insn "lasx_xvwod_w_hu_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addmul:V8SI ++ (zero_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (sign_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.w.hu.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVADDWOD.D.WU.W XVMULWOD.D.WU.W ++(define_insn "lasx_xvwod_d_wu_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addmul:V4DI ++ (zero_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (sign_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.d.wu.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.H.B XVMADDWEV.H.BU ++(define_insn "lasx_xvmaddwev_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (plus:V16HI ++ (match_operand:V16HI 1 "register_operand" "0") ++ (mult:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.h.b\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVMADDWEV.W.H XVMADDWEV.W.HU ++(define_insn "lasx_xvmaddwev_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (plus:V8SI ++ (match_operand:V8SI 1 "register_operand" "0") ++ (mult:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.w.h\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVMADDWEV.D.W XVMADDWEV.D.WU ++(define_insn "lasx_xvmaddwev_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (plus:V4DI ++ (match_operand:V4DI 1 "register_operand" "0") ++ (mult:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.d.w\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.Q.D ++;;TODO2 ++(define_insn "lasx_xvmaddwev_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWEV))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.q.d\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.H.B XVMADDWOD.H.BU ++(define_insn "lasx_xvmaddwod_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (plus:V16HI ++ (match_operand:V16HI 1 "register_operand" "0") ++ (mult:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.h.b\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVMADDWOD.W.H XVMADDWOD.W.HU ++(define_insn "lasx_xvmaddwod_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (plus:V8SI ++ (match_operand:V8SI 1 "register_operand" "0") ++ (mult:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.w.h\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVMADDWOD.D.W XVMADDWOD.D.WU ++(define_insn "lasx_xvmaddwod_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (plus:V4DI ++ (match_operand:V4DI 1 "register_operand" "0") ++ (mult:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.d.w\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.Q.D ++;;TODO2 ++(define_insn "lasx_xvmaddwod_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWOD))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.q.d\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.Q.DU ++;;TODO2 ++(define_insn "lasx_xvmaddwev_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWEV2))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.q.du\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.Q.DU ++;;TODO2 ++(define_insn "lasx_xvmaddwod_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWOD2))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.q.du\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.H.BU.B ++(define_insn "lasx_xvmaddwev_h_bu_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (plus:V16HI ++ (match_operand:V16HI 1 "register_operand" "0") ++ (mult:V16HI ++ (zero_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))) ++ (sign_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.h.bu.b\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVMADDWEV.W.HU.H ++(define_insn "lasx_xvmaddwev_w_hu_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (plus:V8SI ++ (match_operand:V8SI 1 "register_operand" "0") ++ (mult:V8SI ++ (zero_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (sign_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.w.hu.h\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVMADDWEV.D.WU.W ++(define_insn "lasx_xvmaddwev_d_wu_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (plus:V4DI ++ (match_operand:V4DI 1 "register_operand" "0") ++ (mult:V4DI ++ (zero_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (sign_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.d.wu.w\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.Q.DU.D ++;;TODO2 ++(define_insn "lasx_xvmaddwev_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWEV3))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.q.du.d\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.H.BU.B ++(define_insn "lasx_xvmaddwod_h_bu_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (plus:V16HI ++ (match_operand:V16HI 1 "register_operand" "0") ++ (mult:V16HI ++ (zero_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (sign_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.h.bu.b\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVMADDWOD.W.HU.H ++(define_insn "lasx_xvmaddwod_w_hu_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (plus:V8SI ++ (match_operand:V8SI 1 "register_operand" "0") ++ (mult:V8SI ++ (zero_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (sign_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.w.hu.h\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVMADDWOD.D.WU.W ++(define_insn "lasx_xvmaddwod_d_wu_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (plus:V4DI ++ (match_operand:V4DI 1 "register_operand" "0") ++ (mult:V4DI ++ (zero_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (sign_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.d.wu.w\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.Q.DU.D ++;;TODO2 ++(define_insn "lasx_xvmaddwod_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWOD3))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.q.du.d\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVHADDW.Q.D ++;;TODO2 ++(define_insn "lasx_xvhaddw_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVHADDW_Q_D))] ++ "ISA_HAS_LASX" ++ "xvhaddw.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVHSUBW.Q.D ++;;TODO2 ++(define_insn "lasx_xvhsubw_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVHSUBW_Q_D))] ++ "ISA_HAS_LASX" ++ "xvhsubw.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVHADDW.QU.DU ++;;TODO2 ++(define_insn "lasx_xvhaddw_qu_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVHADDW_QU_DU))] ++ "ISA_HAS_LASX" ++ "xvhaddw.qu.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVHSUBW.QU.DU ++;;TODO2 ++(define_insn "lasx_xvhsubw_qu_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVHSUBW_QU_DU))] ++ "ISA_HAS_LASX" ++ "xvhsubw.qu.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVROTR.B XVROTR.H XVROTR.W XVROTR.D ++;;TODO-478 ++(define_insn "lasx_xvrotr_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVROTR))] ++ "ISA_HAS_LASX" ++ "xvrotr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++;;XVADD.Q ++;;TODO2 ++(define_insn "lasx_xvadd_q" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADD_Q))] ++ "ISA_HAS_LASX" ++ "xvadd.q\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUB.Q ++;;TODO2 ++(define_insn "lasx_xvsub_q" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUB_Q))] ++ "ISA_HAS_LASX" ++ "xvsub.q\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSSRLN.B.H XVSSRLN.H.W XVSSRLN.W.D ++(define_insn "lasx_xvssrln__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRLN))] ++ "ISA_HAS_LASX" ++ "xvssrln..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++;;XVREPLVE.B XVREPLVE.H XVREPLVE.W XVREPLVE.D ++(define_insn "lasx_xvreplve_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX [(match_operand:LASX 1 "register_operand" "f") ++ (match_operand:SI 2 "register_operand" "r")] ++ UNSPEC_LASX_XVREPLVE))] ++ "ISA_HAS_LASX" ++ "xvreplve.\t%u0,%u1,%z2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++;;XVADDWEV.Q.DU.D ++(define_insn "lasx_xvaddwev_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWEV3))] ++ "ISA_HAS_LASX" ++ "xvaddwev.q.du.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWOD.Q.DU.D ++(define_insn "lasx_xvaddwod_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWOD3))] ++ "ISA_HAS_LASX" ++ "xvaddwod.q.du.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWEV.Q.DU.D ++(define_insn "lasx_xvmulwev_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWEV3))] ++ "ISA_HAS_LASX" ++ "xvmulwev.q.du.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWOD.Q.DU.D ++(define_insn "lasx_xvmulwod_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWOD3))] ++ "ISA_HAS_LASX" ++ "xvmulwod.q.du.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvpickve2gr_w" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (any_extend:SI ++ (vec_select:SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_7_operand" "")]))))] ++ "ISA_HAS_LASX" ++ "xvpickve2gr.w\t%0,%u1,%2" ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "V8SI")]) ++ ++ ++(define_insn "lasx_xvmskgez_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVMSKGEZ))] ++ "ISA_HAS_LASX" ++ "xvmskgez.b\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvmsknz_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVMSKNZ))] ++ "ISA_HAS_LASX" ++ "xvmsknz.b\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvexth_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 16) (const_int 17) ++ (const_int 18) (const_int 19) ++ (const_int 20) (const_int 21) ++ (const_int 22) (const_int 23) ++ (const_int 24) (const_int 25) ++ (const_int 26) (const_int 27) ++ (const_int 28) (const_int 29) ++ (const_int 30) (const_int 31)]))))] ++ "ISA_HAS_LASX" ++ "xvexth.h.b\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvexth_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(const_int 8) (const_int 9) ++ (const_int 10) (const_int 11) ++ (const_int 12) (const_int 13) ++ (const_int 14) (const_int 15)]))))] ++ "ISA_HAS_LASX" ++ "xvexth.w.h\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvexth_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "xvexth.d.w\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvexth_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVEXTH_Q_D))] ++ "ISA_HAS_LASX" ++ "xvexth.q.d\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvexth_qu_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVEXTH_QU_DU))] ++ "ISA_HAS_LASX" ++ "xvexth.qu.du\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvrotri_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (rotatert:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")))] ++ "ISA_HAS_LASX" ++ "xvrotri.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvextl_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVEXTL_Q_D))] ++ "ISA_HAS_LASX" ++ "xvextl.q.d\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvsrlni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSRLNI))] ++ "ISA_HAS_LASX" ++ "xvsrlni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrlrni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSRLRNI))] ++ "ISA_HAS_LASX" ++ "xvsrlrni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRLNI))] ++ "ISA_HAS_LASX" ++ "xvssrlni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRLNI2))] ++ "ISA_HAS_LASX" ++ "xvssrlni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlrni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRLRNI))] ++ "ISA_HAS_LASX" ++ "xvssrlrni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlrni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRLRNI2))] ++ "ISA_HAS_LASX" ++ "xvssrlrni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrani__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSRANI))] ++ "ISA_HAS_LASX" ++ "xvsrani..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrarni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSRARNI))] ++ "ISA_HAS_LASX" ++ "xvsrarni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrani__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRANI))] ++ "ISA_HAS_LASX" ++ "xvssrani..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrani__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRANI2))] ++ "ISA_HAS_LASX" ++ "xvssrani..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrarni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRARNI))] ++ "ISA_HAS_LASX" ++ "xvssrarni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrarni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRARNI2))] ++ "ISA_HAS_LASX" ++ "xvssrarni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_mode_attr VDOUBLEMODEW256 ++ [(V8SI "V16SI") ++ (V8SF "V16SF")]) ++ ++(define_insn "lasx_xvpermi_" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (unspec:LASX_W [(match_operand:LASX_W 1 "register_operand" "0") ++ (match_operand:LASX_W 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVPERMI))] ++ "ISA_HAS_LASX" ++ "xvpermi.w\t%u0,%u2,%3" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpermi__1" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (vec_select:LASX_W ++ (vec_concat: ++ (match_operand:LASX_W 1 "register_operand" "f") ++ (match_operand:LASX_W 2 "register_operand" "0")) ++ (parallel [(match_operand 3 "const_0_to_3_operand") ++ (match_operand 4 "const_0_to_3_operand" ) ++ (match_operand 5 "const_8_to_11_operand" ) ++ (match_operand 6 "const_8_to_11_operand" ) ++ (match_operand 7 "const_4_to_7_operand" ) ++ (match_operand 8 "const_4_to_7_operand" ) ++ (match_operand 9 "const_12_to_15_operand") ++ (match_operand 10 "const_12_to_15_operand")])))] ++ "ISA_HAS_LASX ++ && INTVAL (operands[3]) + 4 == INTVAL (operands[7]) ++ && INTVAL (operands[4]) + 4 == INTVAL (operands[8]) ++ && INTVAL (operands[5]) + 4 == INTVAL (operands[9]) ++ && INTVAL (operands[6]) + 4 == INTVAL (operands[10])" ++{ ++ int mask = 0; ++ mask |= INTVAL (operands[3]) << 0; ++ mask |= INTVAL (operands[4]) << 2; ++ mask |= (INTVAL (operands[5]) - 8) << 4; ++ mask |= (INTVAL (operands[6]) - 8) << 6; ++ operands[3] = GEN_INT (mask); ++ ++ return "xvpermi.w\t%u0,%u1,%3"; ++} ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_expand "lasx_xvld" ++ [(match_operand:V32QI 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq12b_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (operands[0], gen_rtx_MEM (V32QImode, addr)); ++ DONE; ++}) ++ ++(define_expand "lasx_xvst" ++ [(match_operand:V32QI 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq12b_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (gen_rtx_MEM (V32QImode, addr), operands[0]); ++ DONE; ++}) ++ ++(define_expand "lasx_xvstelm_" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand 3 "const__operand") ++ (match_operand 2 "aq8_operand") ++ (match_operand 1 "pmode_register_operand")] ++ "ISA_HAS_LASX" ++{ ++ emit_insn (gen_lasx_xvstelm__insn ++ (operands[1], operands[2], operands[0], operands[3])); ++ DONE; ++}) ++ ++(define_insn "lasx_xvstelm__insn" ++ [(set (mem: (plus:DI (match_operand:DI 0 "register_operand" "r") ++ (match_operand 1 "aq8_operand"))) ++ (vec_select: ++ (match_operand:LASX 2 "register_operand" "f") ++ (parallel [(match_operand 3 "const__operand" "")])))] ++ "ISA_HAS_LASX" ++{ ++ return "xvstelm.\t%u2,%0,%1,%3"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++;; Offset is "0" ++(define_insn "lasx_xvstelm__insn_0" ++ [(set (mem: (match_operand:DI 0 "register_operand" "r")) ++ (vec_select: ++ (match_operand:LASX_WD 1 "register_operand" "f") ++ (parallel [(match_operand:SI 2 "const__operand")])))] ++ "ISA_HAS_LASX" ++{ ++ return "xvstelm.\t%u1,%0,0,%2"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++(define_insn "lasx_xvinsve0_" ++ [(set (match_operand:LASX_WD 0 "register_operand" "=f") ++ (unspec:LASX_WD [(match_operand:LASX_WD 1 "register_operand" "0") ++ (match_operand:LASX_WD 2 "register_operand" "f") ++ (match_operand 3 "const__operand" "")] ++ UNSPEC_LASX_XVINSVE0))] ++ "ISA_HAS_LASX" ++ "xvinsve0.\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvinsve0__scalar" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (vec_merge:FLASX ++ (vec_duplicate:FLASX ++ (match_operand: 1 "register_operand" "f")) ++ (match_operand:FLASX 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")))] ++ "ISA_HAS_LASX" ++ "xvinsve0.\t%u0,%u1,%y3" ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpickve_" ++ [(set (match_operand:LASX_WD 0 "register_operand" "=f") ++ (unspec:LASX_WD [(match_operand:LASX_WD 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVPICKVE))] ++ "ISA_HAS_LASX" ++ "xvpickve.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpickve__scalar" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vec_select: ++ (match_operand:FLASX 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const__operand" "")])))] ++ "ISA_HAS_LASX" ++ "xvpickve.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlrn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRLRN))] ++ "ISA_HAS_LASX" ++ "xvssrlrn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "xvorn3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (ior:ILASX (not:ILASX (match_operand:ILASX 2 "register_operand" "f")) ++ (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvorn.v\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvextl_qu_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVEXTL_QU_DU))] ++ "ISA_HAS_LASX" ++ "xvextl.qu.du\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvldi" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI[(match_operand 1 "const_imm13_operand")] ++ UNSPEC_LASX_XVLDI))] ++ "ISA_HAS_LASX" ++{ ++ HOST_WIDE_INT val = INTVAL (operands[1]); ++ if (val < 0) ++ { ++ HOST_WIDE_INT modeVal = (val & 0xf00) >> 8; ++ if (modeVal < 13) ++ return "xvldi\t%u0,%1"; ++ else ++ { ++ sorry ("imm13 only support 0000 ~ 1100 in bits '12 ~ 9' when bit '13' is 1"); ++ return "#"; ++ } ++ } ++ else ++ return "xvldi\t%u0,%1"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvldx" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "reg_or_0_operand" "rJ")] ++ UNSPEC_LASX_XVLDX))] ++ "ISA_HAS_LASX" ++{ ++ return "xvldx\t%u0,%1,%z2"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvstx" ++ [(set (mem:V32QI (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "reg_or_0_operand" "rJ"))) ++ (unspec: V32QI[(match_operand:V32QI 0 "register_operand" "f")] ++ UNSPEC_LASX_XVSTX))] ++ ++ "ISA_HAS_LASX" ++{ ++ return "xvstx\t%u0,%1,%z2"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "vec_widen_mult_even_v8si" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (mult:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LASX" ++ "xvmulwev.d.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;; Vector reduction operation ++(define_expand "reduc_plus_scal_v4di" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:V4DI 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (V4DImode); ++ rtx tmp1 = gen_reg_rtx (V4DImode); ++ rtx vec_res = gen_reg_rtx (V4DImode); ++ emit_insn (gen_lasx_xvhaddw_q_d (tmp, operands[1], operands[1])); ++ emit_insn (gen_lasx_xvpermi_d_v4di (tmp1, tmp, GEN_INT (2))); ++ emit_insn (gen_addv4di3 (vec_res, tmp, tmp1)); ++ emit_insn (gen_vec_extractv4didi (operands[0], vec_res, const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_plus_scal_v8si" ++ [(match_operand:SI 0 "register_operand") ++ (match_operand:V8SI 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (V4DImode); ++ rtx tmp1 = gen_reg_rtx (V4DImode); ++ rtx vec_res = gen_reg_rtx (V4DImode); ++ emit_insn (gen_lasx_xvhaddw_d_w (tmp, operands[1], operands[1])); ++ emit_insn (gen_lasx_xvhaddw_q_d (tmp1, tmp, tmp)); ++ emit_insn (gen_lasx_xvpermi_d_v4di (tmp, tmp1, GEN_INT (2))); ++ emit_insn (gen_addv4di3 (vec_res, tmp, tmp1)); ++ emit_insn (gen_vec_extractv8sisi (operands[0], gen_lowpart (V8SImode,vec_res), ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_plus_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:FLASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_add3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc__scal_" ++ [(any_bitwise: ++ (match_operand: 0 "register_operand") ++ (match_operand:ILASX 1 "register_operand"))] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_smax_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:LASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_smax3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_smin_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:LASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_smin3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_umax_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_umax3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_umin_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_umin3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) +diff --git a/gcc/config/loongarch/loongarch-modes.def b/gcc/config/loongarch/loongarch-modes.def +index b69ad3d83..ac9ea3142 100644 +--- a/gcc/config/loongarch/loongarch-modes.def ++++ b/gcc/config/loongarch/loongarch-modes.def +@@ -33,6 +33,7 @@ VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */ + VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI */ + VECTOR_MODES (FLOAT, 16); /* V4SF V2DF */ + ++/* For LARCH LASX 256 bits. */ + VECTOR_MODES (INT, 32); /* V32QI V16HI V8SI V4DI */ + VECTOR_MODES (FLOAT, 32); /* V8SF V4DF */ + +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +index 24e42fa99..133ec9fa8 100644 +--- a/gcc/config/loongarch/loongarch-protos.h ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -89,6 +89,8 @@ extern bool loongarch_split_move_insn_p (rtx, rtx); + extern void loongarch_split_move_insn (rtx, rtx, rtx); + extern void loongarch_split_128bit_move (rtx, rtx); + extern bool loongarch_split_128bit_move_p (rtx, rtx); ++extern void loongarch_split_256bit_move (rtx, rtx); ++extern bool loongarch_split_256bit_move_p (rtx, rtx); + extern void loongarch_split_lsx_copy_d (rtx, rtx, rtx, rtx (*)(rtx, rtx, rtx)); + extern void loongarch_split_lsx_insert_d (rtx, rtx, rtx, rtx); + extern void loongarch_split_lsx_fill_d (rtx, rtx); +@@ -174,9 +176,11 @@ union loongarch_gen_fn_ptrs + extern void loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs, + rtx, rtx, rtx, rtx, rtx); + ++extern void loongarch_expand_vector_group_init (rtx, rtx); + extern void loongarch_expand_vector_init (rtx, rtx); + extern void loongarch_expand_vec_unpack (rtx op[2], bool, bool); + extern void loongarch_expand_vec_perm (rtx, rtx, rtx, rtx); ++extern void loongarch_expand_vec_perm_1 (rtx[]); + extern void loongarch_expand_vector_extract (rtx, rtx, int); + extern void loongarch_expand_vector_reduc (rtx (*)(rtx, rtx, rtx), rtx, rtx); + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 40b83d72b..dae35a479 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -1927,7 +1927,7 @@ loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode) + { + /* LSX LD.* and ST.* cannot support loading symbols via an immediate + operand. */ +- if (LSX_SUPPORTED_MODE_P (mode)) ++ if (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode)) + return 0; + + switch (type) +@@ -2060,6 +2060,11 @@ loongarch_valid_offset_p (rtx x, machine_mode mode) + loongarch_ldst_scaled_shift (mode))) + return false; + ++ /* LASX XVLD.B and XVST.B supports 10-bit signed offsets without shift. */ ++ if (LASX_SUPPORTED_MODE_P (mode) ++ && !loongarch_signed_immediate_p (INTVAL (x), 10, 0)) ++ return false; ++ + return true; + } + +@@ -2272,7 +2277,9 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p) + { + struct loongarch_address_info addr; + int factor; +- bool lsx_p = !might_split_p && LSX_SUPPORTED_MODE_P (mode); ++ bool lsx_p = (!might_split_p ++ && (LSX_SUPPORTED_MODE_P (mode) ++ || LASX_SUPPORTED_MODE_P (mode))); + + if (!loongarch_classify_address (&addr, x, mode, false)) + return 0; +@@ -2418,7 +2425,8 @@ loongarch_const_insns (rtx x) + return loongarch_integer_cost (INTVAL (x)); + + case CONST_VECTOR: +- if (LSX_SUPPORTED_MODE_P (GET_MODE (x)) ++ if ((LSX_SUPPORTED_MODE_P (GET_MODE (x)) ++ || LASX_SUPPORTED_MODE_P (GET_MODE (x))) + && loongarch_const_vector_same_int_p (x, GET_MODE (x), -512, 511)) + return 1; + /* Fall through. */ +@@ -3257,10 +3265,11 @@ loongarch_legitimize_move (machine_mode mode, rtx dest, rtx src) + + /* Both src and dest are non-registers; one special case is supported where + the source is (const_int 0) and the store can source the zero register. +- LSX is never able to source the zero register directly in ++ LSX and LASX are never able to source the zero register directly in + memory operations. */ + if (!register_operand (dest, mode) && !register_operand (src, mode) +- && (!const_0_operand (src, mode) || LSX_SUPPORTED_MODE_P (mode))) ++ && (!const_0_operand (src, mode) ++ || LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))) + { + loongarch_emit_move (dest, force_reg (mode, src)); + return true; +@@ -3842,6 +3851,7 @@ loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, + int misalign ATTRIBUTE_UNUSED) + { + unsigned elements; ++ machine_mode mode = vectype != NULL ? TYPE_MODE (vectype) : DImode; + + switch (type_of_cost) + { +@@ -3858,7 +3868,8 @@ loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, + return 1; + + case vec_perm: +- return 1; ++ return LASX_SUPPORTED_MODE_P (mode) ++ && !LSX_SUPPORTED_MODE_P (mode) ? 2 : 1; + + case unaligned_load: + case vector_gather_load: +@@ -3939,6 +3950,10 @@ loongarch_split_move_p (rtx dest, rtx src) + if (LSX_SUPPORTED_MODE_P (GET_MODE (dest))) + return loongarch_split_128bit_move_p (dest, src); + ++ /* Check if LASX moves need splitting. */ ++ if (LASX_SUPPORTED_MODE_P (GET_MODE (dest))) ++ return loongarch_split_256bit_move_p (dest, src); ++ + /* Otherwise split all multiword moves. */ + return size > UNITS_PER_WORD; + } +@@ -3954,6 +3969,8 @@ loongarch_split_move (rtx dest, rtx src, rtx insn_) + gcc_checking_assert (loongarch_split_move_p (dest, src)); + if (LSX_SUPPORTED_MODE_P (GET_MODE (dest))) + loongarch_split_128bit_move (dest, src); ++ else if (LASX_SUPPORTED_MODE_P (GET_MODE (dest))) ++ loongarch_split_256bit_move (dest, src); + else if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src)) + { + if (!TARGET_64BIT && GET_MODE (dest) == DImode) +@@ -4119,7 +4136,7 @@ const char * + loongarch_output_move_index_float (rtx x, machine_mode mode, bool ldr) + { + int index = exact_log2 (GET_MODE_SIZE (mode)); +- if (!IN_RANGE (index, 2, 4)) ++ if (!IN_RANGE (index, 2, 5)) + return NULL; + + struct loongarch_address_info info; +@@ -4128,17 +4145,19 @@ loongarch_output_move_index_float (rtx x, machine_mode mode, bool ldr) + || !loongarch_legitimate_address_p (mode, x, false)) + return NULL; + +- const char *const insn[][3] = ++ const char *const insn[][4] = + { + { + "fstx.s\t%1,%0", + "fstx.d\t%1,%0", +- "vstx\t%w1,%0" ++ "vstx\t%w1,%0", ++ "xvstx\t%u1,%0" + }, + { + "fldx.s\t%0,%1", + "fldx.d\t%0,%1", +- "vldx\t%w0,%1" ++ "vldx\t%w0,%1", ++ "xvldx\t%u0,%1" + } + }; + +@@ -4172,6 +4191,34 @@ loongarch_split_128bit_move_p (rtx dest, rtx src) + return true; + } + ++/* Return true if a 256-bit move from SRC to DEST should be split. */ ++ ++bool ++loongarch_split_256bit_move_p (rtx dest, rtx src) ++{ ++ /* LSX-to-LSX moves can be done in a single instruction. */ ++ if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest)) ++ return false; ++ ++ /* Check for LSX loads and stores. */ ++ if (FP_REG_RTX_P (dest) && MEM_P (src)) ++ return false; ++ if (FP_REG_RTX_P (src) && MEM_P (dest)) ++ return false; ++ ++ /* Check for LSX set to an immediate const vector with valid replicated ++ element. */ ++ if (FP_REG_RTX_P (dest) ++ && loongarch_const_vector_same_int_p (src, GET_MODE (src), -512, 511)) ++ return false; ++ ++ /* Check for LSX load zero immediate. */ ++ if (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src))) ++ return false; ++ ++ return true; ++} ++ + /* Split a 128-bit move from SRC to DEST. */ + + void +@@ -4263,6 +4310,97 @@ loongarch_split_128bit_move (rtx dest, rtx src) + } + } + ++/* Split a 256-bit move from SRC to DEST. */ ++ ++void ++loongarch_split_256bit_move (rtx dest, rtx src) ++{ ++ int byte, index; ++ rtx low_dest, low_src, d, s; ++ ++ if (FP_REG_RTX_P (dest)) ++ { ++ gcc_assert (!MEM_P (src)); ++ ++ rtx new_dest = dest; ++ if (!TARGET_64BIT) ++ { ++ if (GET_MODE (dest) != V8SImode) ++ new_dest = simplify_gen_subreg (V8SImode, dest, GET_MODE (dest), 0); ++ } ++ else ++ { ++ if (GET_MODE (dest) != V4DImode) ++ new_dest = simplify_gen_subreg (V4DImode, dest, GET_MODE (dest), 0); ++ } ++ ++ for (byte = 0, index = 0; byte < GET_MODE_SIZE (GET_MODE (dest)); ++ byte += UNITS_PER_WORD, index++) ++ { ++ s = loongarch_subword_at_byte (src, byte); ++ if (!TARGET_64BIT) ++ emit_insn (gen_lasx_xvinsgr2vr_w (new_dest, s, new_dest, ++ GEN_INT (1 << index))); ++ else ++ emit_insn (gen_lasx_xvinsgr2vr_d (new_dest, s, new_dest, ++ GEN_INT (1 << index))); ++ } ++ } ++ else if (FP_REG_RTX_P (src)) ++ { ++ gcc_assert (!MEM_P (dest)); ++ ++ rtx new_src = src; ++ if (!TARGET_64BIT) ++ { ++ if (GET_MODE (src) != V8SImode) ++ new_src = simplify_gen_subreg (V8SImode, src, GET_MODE (src), 0); ++ } ++ else ++ { ++ if (GET_MODE (src) != V4DImode) ++ new_src = simplify_gen_subreg (V4DImode, src, GET_MODE (src), 0); ++ } ++ ++ for (byte = 0, index = 0; byte < GET_MODE_SIZE (GET_MODE (src)); ++ byte += UNITS_PER_WORD, index++) ++ { ++ d = loongarch_subword_at_byte (dest, byte); ++ if (!TARGET_64BIT) ++ emit_insn (gen_lsx_vpickve2gr_w (d, new_src, GEN_INT (index))); ++ else ++ emit_insn (gen_lsx_vpickve2gr_d (d, new_src, GEN_INT (index))); ++ } ++ } ++ else ++ { ++ low_dest = loongarch_subword_at_byte (dest, 0); ++ low_src = loongarch_subword_at_byte (src, 0); ++ gcc_assert (REG_P (low_dest) && REG_P (low_src)); ++ /* Make sure the source register is not written before reading. */ ++ if (REGNO (low_dest) <= REGNO (low_src)) ++ { ++ for (byte = 0; byte < GET_MODE_SIZE (TImode); ++ byte += UNITS_PER_WORD) ++ { ++ d = loongarch_subword_at_byte (dest, byte); ++ s = loongarch_subword_at_byte (src, byte); ++ loongarch_emit_move (d, s); ++ } ++ } ++ else ++ { ++ for (byte = GET_MODE_SIZE (TImode) - UNITS_PER_WORD; byte >= 0; ++ byte -= UNITS_PER_WORD) ++ { ++ d = loongarch_subword_at_byte (dest, byte); ++ s = loongarch_subword_at_byte (src, byte); ++ loongarch_emit_move (d, s); ++ } ++ } ++ } ++} ++ + + /* Split a COPY_S.D with operands DEST, SRC and INDEX. GEN is a function + used to generate subregs. */ +@@ -4350,11 +4488,12 @@ loongarch_output_move (rtx dest, rtx src) + machine_mode mode = GET_MODE (dest); + bool dbl_p = (GET_MODE_SIZE (mode) == 8); + bool lsx_p = LSX_SUPPORTED_MODE_P (mode); ++ bool lasx_p = LASX_SUPPORTED_MODE_P (mode); + + if (loongarch_split_move_p (dest, src)) + return "#"; + +- if ((lsx_p) ++ if ((lsx_p || lasx_p) + && dest_code == REG && FP_REG_P (REGNO (dest)) + && src_code == CONST_VECTOR + && CONST_INT_P (CONST_VECTOR_ELT (src, 0))) +@@ -4364,6 +4503,8 @@ loongarch_output_move (rtx dest, rtx src) + { + case 16: + return "vrepli.%v0\t%w0,%E1"; ++ case 32: ++ return "xvrepli.%v0\t%u0,%E1"; + default: gcc_unreachable (); + } + } +@@ -4378,13 +4519,15 @@ loongarch_output_move (rtx dest, rtx src) + + if (FP_REG_P (REGNO (dest))) + { +- if (lsx_p) ++ if (lsx_p || lasx_p) + { + gcc_assert (src == CONST0_RTX (GET_MODE (src))); + switch (GET_MODE_SIZE (mode)) + { + case 16: + return "vrepli.b\t%w0,0"; ++ case 32: ++ return "xvrepli.b\t%u0,0"; + default: + gcc_unreachable (); + } +@@ -4517,12 +4660,14 @@ loongarch_output_move (rtx dest, rtx src) + { + if (dest_code == REG && FP_REG_P (REGNO (dest))) + { +- if (lsx_p) ++ if (lsx_p || lasx_p) + { + switch (GET_MODE_SIZE (mode)) + { + case 16: + return "vori.b\t%w0,%w1,0"; ++ case 32: ++ return "xvori.b\t%u0,%u1,0"; + default: + gcc_unreachable (); + } +@@ -4540,12 +4685,14 @@ loongarch_output_move (rtx dest, rtx src) + if (insn) + return insn; + +- if (lsx_p) ++ if (lsx_p || lasx_p) + { + switch (GET_MODE_SIZE (mode)) + { + case 16: + return "vst\t%w1,%0"; ++ case 32: ++ return "xvst\t%u1,%0"; + default: + gcc_unreachable (); + } +@@ -4566,12 +4713,14 @@ loongarch_output_move (rtx dest, rtx src) + if (insn) + return insn; + +- if (lsx_p) ++ if (lsx_p || lasx_p) + { + switch (GET_MODE_SIZE (mode)) + { + case 16: + return "vld\t%w0,%1"; ++ case 32: ++ return "xvld\t%u0,%1"; + default: + gcc_unreachable (); + } +@@ -5599,18 +5748,27 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part, + 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...), + 'z' for (eq:?I ...), 'n' for (ne:?I ...). + 't' Like 'T', but with the EQ/NE cases reversed +- 'V' Print exact log2 of CONST_INT OP element 0 of a replicated +- CONST_VECTOR in decimal. ++ 'F' Print the FPU branch condition for comparison OP. ++ 'W' Print the inverse of the FPU branch condition for comparison OP. ++ 'w' Print a LSX register. ++ 'u' Print a LASX register. ++ 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...), ++ 'z' for (eq:?I ...), 'n' for (ne:?I ...). ++ 't' Like 'T', but with the EQ/NE cases reversed ++ 'Y' Print loongarch_fp_conditions[INTVAL (OP)] ++ 'Z' Print OP and a comma for 8CC, otherwise print nothing. ++ 'z' Print $0 if OP is zero, otherwise print OP normally. + 'v' Print the insn size suffix b, h, w or d for vector modes V16QI, V8HI, + V4SI, V2SI, and w, d for vector modes V4SF, V2DF respectively. ++ 'V' Print exact log2 of CONST_INT OP element 0 of a replicated ++ CONST_VECTOR in decimal. + 'W' Print the inverse of the FPU branch condition for comparison OP. +- 'w' Print a LSX register. + 'X' Print CONST_INT OP in hexadecimal format. + 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format. + 'Y' Print loongarch_fp_conditions[INTVAL (OP)] + 'y' Print exact log2 of CONST_INT OP in decimal. + 'Z' Print OP and a comma for 8CC, otherwise print nothing. +- 'z' Print $r0 if OP is zero, otherwise print OP normally. */ ++ 'z' Print $0 if OP is zero, otherwise print OP normally. */ + + static void + loongarch_print_operand (FILE *file, rtx op, int letter) +@@ -5752,46 +5910,11 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + output_operand_lossage ("invalid use of '%%%c'", letter); + break; + +- case 'v': +- switch (GET_MODE (op)) +- { +- case E_V16QImode: +- case E_V32QImode: +- fprintf (file, "b"); +- break; +- case E_V8HImode: +- case E_V16HImode: +- fprintf (file, "h"); +- break; +- case E_V4SImode: +- case E_V4SFmode: +- case E_V8SImode: +- case E_V8SFmode: +- fprintf (file, "w"); +- break; +- case E_V2DImode: +- case E_V2DFmode: +- case E_V4DImode: +- case E_V4DFmode: +- fprintf (file, "d"); +- break; +- default: +- output_operand_lossage ("invalid use of '%%%c'", letter); +- } +- break; +- + case 'W': + loongarch_print_float_branch_condition (file, reverse_condition (code), + letter); + break; + +- case 'w': +- if (code == REG && LSX_REG_P (REGNO (op))) +- fprintf (file, "$vr%s", ®_names[REGNO (op)][2]); +- else +- output_operand_lossage ("invalid use of '%%%c'", letter); +- break; +- + case 'x': + if (CONST_INT_P (op)) + fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff); +@@ -5833,6 +5956,48 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + fputc (',', file); + break; + ++ case 'w': ++ if (code == REG && LSX_REG_P (REGNO (op))) ++ fprintf (file, "$vr%s", ®_names[REGNO (op)][2]); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; ++ ++ case 'u': ++ if (code == REG && LASX_REG_P (REGNO (op))) ++ fprintf (file, "$xr%s", ®_names[REGNO (op)][2]); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; ++ ++ case 'v': ++ switch (GET_MODE (op)) ++ { ++ case E_V16QImode: ++ case E_V32QImode: ++ fprintf (file, "b"); ++ break; ++ case E_V8HImode: ++ case E_V16HImode: ++ fprintf (file, "h"); ++ break; ++ case E_V4SImode: ++ case E_V4SFmode: ++ case E_V8SImode: ++ case E_V8SFmode: ++ fprintf (file, "w"); ++ break; ++ case E_V2DImode: ++ case E_V2DFmode: ++ case E_V4DImode: ++ case E_V4DFmode: ++ fprintf (file, "d"); ++ break; ++ default: ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ } ++ break; ++ + default: + switch (code) + { +@@ -6163,13 +6328,18 @@ loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode) + size = GET_MODE_SIZE (mode); + mclass = GET_MODE_CLASS (mode); + +- if (GP_REG_P (regno) && !LSX_SUPPORTED_MODE_P (mode)) ++ if (GP_REG_P (regno) && !LSX_SUPPORTED_MODE_P (mode) ++ && !LASX_SUPPORTED_MODE_P (mode)) + return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD; + + /* For LSX, allow TImode and 128-bit vector modes in all FPR. */ + if (FP_REG_P (regno) && LSX_SUPPORTED_MODE_P (mode)) + return true; + ++ /* FIXED ME: For LASX, allow TImode and 256-bit vector modes in all FPR. */ ++ if (FP_REG_P (regno) && LASX_SUPPORTED_MODE_P (mode)) ++ return true; ++ + if (FP_REG_P (regno)) + { + if (mclass == MODE_FLOAT +@@ -6222,6 +6392,9 @@ loongarch_hard_regno_nregs (unsigned int regno, machine_mode mode) + if (LSX_SUPPORTED_MODE_P (mode)) + return 1; + ++ if (LASX_SUPPORTED_MODE_P (mode)) ++ return 1; ++ + return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG; + } + +@@ -6251,7 +6424,10 @@ loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode) + { + if (loongarch_hard_regno_mode_ok (FP_REG_FIRST, mode)) + { +- if (LSX_SUPPORTED_MODE_P (mode)) ++ /* Fixed me. */ ++ if (LASX_SUPPORTED_MODE_P (mode)) ++ size = MIN (size, UNITS_PER_LASX_REG); ++ else if (LSX_SUPPORTED_MODE_P (mode)) + size = MIN (size, UNITS_PER_LSX_REG); + else + size = MIN (size, UNITS_PER_FPREG); +@@ -6269,6 +6445,10 @@ static bool + loongarch_can_change_mode_class (machine_mode from, machine_mode to, + reg_class_t rclass) + { ++ /* Allow conversions between different LSX/LASX vector modes. */ ++ if (LASX_SUPPORTED_MODE_P (from) && LASX_SUPPORTED_MODE_P (to)) ++ return true; ++ + /* Allow conversions between different LSX vector modes. */ + if (LSX_SUPPORTED_MODE_P (from) && LSX_SUPPORTED_MODE_P (to)) + return true; +@@ -6292,7 +6472,8 @@ loongarch_mode_ok_for_mov_fmt_p (machine_mode mode) + return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT; + + default: +- return LSX_SUPPORTED_MODE_P (mode); ++ return ISA_HAS_LASX ? LASX_SUPPORTED_MODE_P (mode) ++ : LSX_SUPPORTED_MODE_P (mode); + } + } + +@@ -6494,7 +6675,8 @@ loongarch_valid_pointer_mode (scalar_int_mode mode) + static bool + loongarch_vector_mode_supported_p (machine_mode mode) + { +- return LSX_SUPPORTED_MODE_P (mode); ++ return ISA_HAS_LASX ? LASX_SUPPORTED_MODE_P (mode) ++ : LSX_SUPPORTED_MODE_P (mode); + } + + /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */ +@@ -6520,19 +6702,19 @@ loongarch_preferred_simd_mode (scalar_mode mode) + switch (mode) + { + case E_QImode: +- return E_V16QImode; ++ return ISA_HAS_LASX ? E_V32QImode : E_V16QImode; + case E_HImode: +- return E_V8HImode; ++ return ISA_HAS_LASX ? E_V16HImode : E_V8HImode; + case E_SImode: +- return E_V4SImode; ++ return ISA_HAS_LASX ? E_V8SImode : E_V4SImode; + case E_DImode: +- return E_V2DImode; ++ return ISA_HAS_LASX ? E_V4DImode : E_V2DImode; + + case E_SFmode: +- return E_V4SFmode; ++ return ISA_HAS_LASX ? E_V8SFmode : E_V4SFmode; + + case E_DFmode: +- return E_V2DFmode; ++ return ISA_HAS_LASX ? E_V4DFmode : E_V2DFmode; + + default: + break; +@@ -6543,7 +6725,12 @@ loongarch_preferred_simd_mode (scalar_mode mode) + static unsigned int + loongarch_autovectorize_vector_modes (vector_modes *modes, bool) + { +- if (ISA_HAS_LSX) ++ if (ISA_HAS_LASX) ++ { ++ modes->safe_push (V32QImode); ++ modes->safe_push (V16QImode); ++ } ++ else if (ISA_HAS_LSX) + { + modes->safe_push (V16QImode); + } +@@ -6723,11 +6910,18 @@ const char * + loongarch_lsx_output_division (const char *division, rtx *operands) + { + const char *s; ++ machine_mode mode = GET_MODE (*operands); + + s = division; + if (TARGET_CHECK_ZERO_DIV) + { +- if (ISA_HAS_LSX) ++ if (ISA_HAS_LASX && GET_MODE_SIZE (mode) == 32) ++ { ++ output_asm_insn ("xvsetallnez.%v0\t$fcc7,%u2",operands); ++ output_asm_insn (s, operands); ++ output_asm_insn ("bcnez\t$fcc7,1f", operands); ++ } ++ else if (ISA_HAS_LSX) + { + output_asm_insn ("vsetallnez.%v0\t$fcc7,%w2",operands); + output_asm_insn (s, operands); +@@ -7566,7 +7760,7 @@ loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d) + rtx_insn *insn; + unsigned i; + +- if (!ISA_HAS_LSX) ++ if (!ISA_HAS_LSX && !ISA_HAS_LASX) + return false; + + for (i = 0; i < d->nelt; i++) +@@ -7590,40 +7784,484 @@ loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d) + return true; + } + +-void +-loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel) ++/* Try to simplify a two vector permutation using 2 intra-lane interleave ++ insns and cross-lane shuffle for 32-byte vectors. */ ++ ++static bool ++loongarch_expand_vec_perm_interleave (struct expand_vec_perm_d *d) + { +- machine_mode vmode = GET_MODE (target); ++ unsigned i, nelt; ++ rtx t1,t2,t3; ++ rtx (*gen_high) (rtx, rtx, rtx); ++ rtx (*gen_low) (rtx, rtx, rtx); ++ machine_mode mode = GET_MODE (d->target); + +- switch (vmode) ++ if (d->one_vector_p) ++ return false; ++ if (ISA_HAS_LASX && GET_MODE_SIZE (d->vmode) == 32) ++ ; ++ else ++ return false; ++ ++ nelt = d->nelt; ++ if (d->perm[0] != 0 && d->perm[0] != nelt / 2) ++ return false; ++ for (i = 0; i < nelt; i += 2) ++ if (d->perm[i] != d->perm[0] + i / 2 ++ || d->perm[i + 1] != d->perm[0] + i / 2 + nelt) ++ return false; ++ ++ if (d->testing_p) ++ return true; ++ ++ switch (d->vmode) + { +- case E_V16QImode: +- emit_insn (gen_lsx_vshuf_b (target, op1, op0, sel)); ++ case E_V32QImode: ++ gen_high = gen_lasx_xvilvh_b; ++ gen_low = gen_lasx_xvilvl_b; + break; +- case E_V2DFmode: +- emit_insn (gen_lsx_vshuf_d_f (target, sel, op1, op0)); ++ case E_V16HImode: ++ gen_high = gen_lasx_xvilvh_h; ++ gen_low = gen_lasx_xvilvl_h; + break; +- case E_V2DImode: +- emit_insn (gen_lsx_vshuf_d (target, sel, op1, op0)); ++ case E_V8SImode: ++ gen_high = gen_lasx_xvilvh_w; ++ gen_low = gen_lasx_xvilvl_w; + break; +- case E_V4SFmode: +- emit_insn (gen_lsx_vshuf_w_f (target, sel, op1, op0)); ++ case E_V4DImode: ++ gen_high = gen_lasx_xvilvh_d; ++ gen_low = gen_lasx_xvilvl_d; + break; +- case E_V4SImode: +- emit_insn (gen_lsx_vshuf_w (target, sel, op1, op0)); ++ case E_V8SFmode: ++ gen_high = gen_lasx_xvilvh_w_f; ++ gen_low = gen_lasx_xvilvl_w_f; + break; +- case E_V8HImode: +- emit_insn (gen_lsx_vshuf_h (target, sel, op1, op0)); ++ case E_V4DFmode: ++ gen_high = gen_lasx_xvilvh_d_f; ++ gen_low = gen_lasx_xvilvl_d_f; + break; + default: +- break; ++ gcc_unreachable (); ++ } ++ ++ t1 = gen_reg_rtx (mode); ++ t2 = gen_reg_rtx (mode); ++ emit_insn (gen_high (t1, d->op0, d->op1)); ++ emit_insn (gen_low (t2, d->op0, d->op1)); ++ if (mode == V4DFmode || mode == V8SFmode) ++ { ++ t3 = gen_reg_rtx (V4DFmode); ++ if (d->perm[0]) ++ emit_insn (gen_lasx_xvpermi_q_v4df (t3, gen_lowpart (V4DFmode, t1), ++ gen_lowpart (V4DFmode, t2), ++ GEN_INT (0x31))); ++ else ++ emit_insn (gen_lasx_xvpermi_q_v4df (t3, gen_lowpart (V4DFmode, t1), ++ gen_lowpart (V4DFmode, t2), ++ GEN_INT (0x20))); + } ++ else ++ { ++ t3 = gen_reg_rtx (V4DImode); ++ if (d->perm[0]) ++ emit_insn (gen_lasx_xvpermi_q_v4di (t3, gen_lowpart (V4DImode, t1), ++ gen_lowpart (V4DImode, t2), ++ GEN_INT (0x31))); ++ else ++ emit_insn (gen_lasx_xvpermi_q_v4di (t3, gen_lowpart (V4DImode, t1), ++ gen_lowpart (V4DImode, t2), ++ GEN_INT (0x20))); ++ } ++ emit_move_insn (d->target, gen_lowpart (mode, t3)); ++ return true; + } + ++/* Implement extract-even and extract-odd permutations. */ ++ + static bool +-loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d) ++loongarch_expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd) + { +- int i; ++ rtx t1; ++ machine_mode mode = GET_MODE (d->target); ++ ++ if (d->testing_p) ++ return true; ++ ++ t1 = gen_reg_rtx (mode); ++ ++ switch (d->vmode) ++ { ++ case E_V4DFmode: ++ /* Shuffle the lanes around into { 0 4 2 6 } and { 1 5 3 7 }. */ ++ if (odd) ++ emit_insn (gen_lasx_xvilvh_d_f (t1, d->op0, d->op1)); ++ else ++ emit_insn (gen_lasx_xvilvl_d_f (t1, d->op0, d->op1)); ++ ++ /* Shuffle within the 256-bit lanes to produce the result required. ++ { 0 2 4 6 } | { 1 3 5 7 }. */ ++ emit_insn (gen_lasx_xvpermi_d_v4df (d->target, t1, GEN_INT (0xd8))); ++ break; ++ ++ case E_V4DImode: ++ if (odd) ++ emit_insn (gen_lasx_xvilvh_d (t1, d->op0, d->op1)); ++ else ++ emit_insn (gen_lasx_xvilvl_d (t1, d->op0, d->op1)); ++ ++ emit_insn (gen_lasx_xvpermi_d_v4di (d->target, t1, GEN_INT (0xd8))); ++ break; ++ ++ case E_V8SFmode: ++ /* Shuffle the lanes around into: ++ { 0 2 8 a 4 6 c e } | { 1 3 9 b 5 7 d f }. */ ++ if (odd) ++ emit_insn (gen_lasx_xvpickod_w_f (t1, d->op0, d->op1)); ++ else ++ emit_insn (gen_lasx_xvpickev_w_f (t1, d->op0, d->op1)); ++ ++ /* Shuffle within the 256-bit lanes to produce the result required. ++ { 0 2 4 6 8 a c e } | { 1 3 5 7 9 b d f }. */ ++ emit_insn (gen_lasx_xvpermi_d_v8sf (d->target, t1, GEN_INT (0xd8))); ++ break; ++ ++ case E_V8SImode: ++ if (odd) ++ emit_insn (gen_lasx_xvpickod_w (t1, d->op0, d->op1)); ++ else ++ emit_insn (gen_lasx_xvpickev_w (t1, d->op0, d->op1)); ++ ++ emit_insn (gen_lasx_xvpermi_d_v8si (d->target, t1, GEN_INT (0xd8))); ++ break; ++ ++ case E_V16HImode: ++ if (odd) ++ emit_insn (gen_lasx_xvpickod_h (t1, d->op0, d->op1)); ++ else ++ emit_insn (gen_lasx_xvpickev_h (t1, d->op0, d->op1)); ++ ++ emit_insn (gen_lasx_xvpermi_d_v16hi (d->target, t1, GEN_INT (0xd8))); ++ break; ++ ++ case E_V32QImode: ++ if (odd) ++ emit_insn (gen_lasx_xvpickod_b (t1, d->op0, d->op1)); ++ else ++ emit_insn (gen_lasx_xvpickev_b (t1, d->op0, d->op1)); ++ ++ emit_insn (gen_lasx_xvpermi_d_v32qi (d->target, t1, GEN_INT (0xd8))); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ return true; ++} ++ ++/* Pattern match extract-even and extract-odd permutations. */ ++ ++static bool ++loongarch_expand_vec_perm_even_odd (struct expand_vec_perm_d *d) ++{ ++ unsigned i, odd, nelt = d->nelt; ++ if (!ISA_HAS_LASX) ++ return false; ++ ++ odd = d->perm[0]; ++ if (odd != 0 && odd != 1) ++ return false; ++ ++ for (i = 1; i < nelt; ++i) ++ if (d->perm[i] != 2 * i + odd) ++ return false; ++ ++ return loongarch_expand_vec_perm_even_odd_1 (d, odd); ++} ++ ++/* Expand a variable vector permutation for LASX. */ ++ ++void ++loongarch_expand_vec_perm_1 (rtx operands[]) ++{ ++ rtx target = operands[0]; ++ rtx op0 = operands[1]; ++ rtx op1 = operands[2]; ++ rtx mask = operands[3]; ++ ++ bool one_operand_shuffle = rtx_equal_p (op0, op1); ++ rtx t1 = NULL; ++ rtx t2 = NULL; ++ rtx t3, t4, t5, t6, vt = NULL; ++ rtx vec[32] = {NULL}; ++ machine_mode mode = GET_MODE (op0); ++ machine_mode maskmode = GET_MODE (mask); ++ int w, i; ++ ++ /* Number of elements in the vector. */ ++ w = GET_MODE_NUNITS (mode); ++ ++ rtx round_data[MAX_VECT_LEN]; ++ rtx round_reg, round_data_rtx; ++ ++ if (mode != E_V32QImode) ++ { ++ for (int i = 0; i < w; i += 1) ++ { ++ round_data[i] = GEN_INT (0x1f); ++ } ++ ++ if (mode == E_V4DFmode) ++ { ++ round_data_rtx = gen_rtx_CONST_VECTOR (E_V4DImode, ++ gen_rtvec_v (w, round_data)); ++ round_reg = gen_reg_rtx (E_V4DImode); ++ } ++ else if (mode == E_V8SFmode) ++ { ++ ++ round_data_rtx = gen_rtx_CONST_VECTOR (E_V8SImode, ++ gen_rtvec_v (w, round_data)); ++ round_reg = gen_reg_rtx (E_V8SImode); ++ } ++ else ++ { ++ round_data_rtx = gen_rtx_CONST_VECTOR (mode, ++ gen_rtvec_v (w, round_data)); ++ round_reg = gen_reg_rtx (mode); ++ } ++ ++ emit_move_insn (round_reg, round_data_rtx); ++ switch (mode) ++ { ++ case E_V32QImode: ++ emit_insn (gen_andv32qi3 (mask, mask, round_reg)); ++ break; ++ case E_V16HImode: ++ emit_insn (gen_andv16hi3 (mask, mask, round_reg)); ++ break; ++ case E_V8SImode: ++ case E_V8SFmode: ++ emit_insn (gen_andv8si3 (mask, mask, round_reg)); ++ break; ++ case E_V4DImode: ++ case E_V4DFmode: ++ emit_insn (gen_andv4di3 (mask, mask, round_reg)); ++ break; ++ default: ++ gcc_unreachable (); ++ break; ++ } ++ } ++ ++ if (mode == V4DImode || mode == V4DFmode) ++ { ++ maskmode = mode = V8SImode; ++ w = 8; ++ t1 = gen_reg_rtx (maskmode); ++ ++ /* Replicate the low bits of the V4DImode mask into V8SImode: ++ mask = { A B C D } ++ t1 = { A A B B C C D D }. */ ++ for (i = 0; i < w / 2; ++i) ++ vec[i*2 + 1] = vec[i*2] = GEN_INT (i * 2); ++ vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec)); ++ vt = force_reg (maskmode, vt); ++ mask = gen_lowpart (maskmode, mask); ++ emit_insn (gen_lasx_xvperm_w (t1, mask, vt)); ++ ++ /* Multiply the shuffle indicies by two. */ ++ t1 = expand_simple_binop (maskmode, PLUS, t1, t1, t1, 1, ++ OPTAB_DIRECT); ++ ++ /* Add one to the odd shuffle indicies: ++ t1 = { A*2, A*2+1, B*2, B*2+1, ... }. */ ++ for (i = 0; i < w / 2; ++i) ++ { ++ vec[i * 2] = const0_rtx; ++ vec[i * 2 + 1] = const1_rtx; ++ } ++ vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec)); ++ vt = validize_mem (force_const_mem (maskmode, vt)); ++ t1 = expand_simple_binop (maskmode, PLUS, t1, vt, t1, 1, ++ OPTAB_DIRECT); ++ ++ /* Continue as if V8SImode (resp. V32QImode) was used initially. */ ++ operands[3] = mask = t1; ++ target = gen_reg_rtx (mode); ++ op0 = gen_lowpart (mode, op0); ++ op1 = gen_lowpart (mode, op1); ++ } ++ ++ switch (mode) ++ { ++ case E_V8SImode: ++ if (one_operand_shuffle) ++ { ++ emit_insn (gen_lasx_xvperm_w (target, op0, mask)); ++ if (target != operands[0]) ++ emit_move_insn (operands[0], ++ gen_lowpart (GET_MODE (operands[0]), target)); ++ } ++ else ++ { ++ t1 = gen_reg_rtx (V8SImode); ++ t2 = gen_reg_rtx (V8SImode); ++ emit_insn (gen_lasx_xvperm_w (t1, op0, mask)); ++ emit_insn (gen_lasx_xvperm_w (t2, op1, mask)); ++ goto merge_two; ++ } ++ return; ++ ++ case E_V8SFmode: ++ mask = gen_lowpart (V8SImode, mask); ++ if (one_operand_shuffle) ++ emit_insn (gen_lasx_xvperm_w_f (target, op0, mask)); ++ else ++ { ++ t1 = gen_reg_rtx (V8SFmode); ++ t2 = gen_reg_rtx (V8SFmode); ++ emit_insn (gen_lasx_xvperm_w_f (t1, op0, mask)); ++ emit_insn (gen_lasx_xvperm_w_f (t2, op1, mask)); ++ goto merge_two; ++ } ++ return; ++ ++ case E_V16HImode: ++ if (one_operand_shuffle) ++ { ++ t1 = gen_reg_rtx (V16HImode); ++ t2 = gen_reg_rtx (V16HImode); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (t1, op0, GEN_INT (0x44))); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (t2, op0, GEN_INT (0xee))); ++ emit_insn (gen_lasx_xvshuf_h (target, mask, t2, t1)); ++ } ++ else ++ { ++ t1 = gen_reg_rtx (V16HImode); ++ t2 = gen_reg_rtx (V16HImode); ++ t3 = gen_reg_rtx (V16HImode); ++ t4 = gen_reg_rtx (V16HImode); ++ t5 = gen_reg_rtx (V16HImode); ++ t6 = gen_reg_rtx (V16HImode); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (t3, op0, GEN_INT (0x44))); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (t4, op0, GEN_INT (0xee))); ++ emit_insn (gen_lasx_xvshuf_h (t1, mask, t4, t3)); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (t5, op1, GEN_INT (0x44))); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (t6, op1, GEN_INT (0xee))); ++ emit_insn (gen_lasx_xvshuf_h (t2, mask, t6, t5)); ++ goto merge_two; ++ } ++ return; ++ ++ case E_V32QImode: ++ if (one_operand_shuffle) ++ { ++ t1 = gen_reg_rtx (V32QImode); ++ t2 = gen_reg_rtx (V32QImode); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (t1, op0, GEN_INT (0x44))); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (t2, op0, GEN_INT (0xee))); ++ emit_insn (gen_lasx_xvshuf_b (target, t2, t1, mask)); ++ } ++ else ++ { ++ t1 = gen_reg_rtx (V32QImode); ++ t2 = gen_reg_rtx (V32QImode); ++ t3 = gen_reg_rtx (V32QImode); ++ t4 = gen_reg_rtx (V32QImode); ++ t5 = gen_reg_rtx (V32QImode); ++ t6 = gen_reg_rtx (V32QImode); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (t3, op0, GEN_INT (0x44))); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (t4, op0, GEN_INT (0xee))); ++ emit_insn (gen_lasx_xvshuf_b (t1, t4, t3, mask)); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (t5, op1, GEN_INT (0x44))); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (t6, op1, GEN_INT (0xee))); ++ emit_insn (gen_lasx_xvshuf_b (t2, t6, t5, mask)); ++ goto merge_two; ++ } ++ return; ++ ++ default: ++ gcc_assert (GET_MODE_SIZE (mode) == 32); ++ break; ++ } ++ ++merge_two: ++ /* Then merge them together. The key is whether any given control ++ element contained a bit set that indicates the second word. */ ++ rtx xops[6]; ++ mask = operands[3]; ++ vt = GEN_INT (w); ++ vt = gen_const_vec_duplicate (maskmode, vt); ++ vt = force_reg (maskmode, vt); ++ mask = expand_simple_binop (maskmode, AND, mask, vt, ++ NULL_RTX, 0, OPTAB_DIRECT); ++ if (GET_MODE (target) != mode) ++ target = gen_reg_rtx (mode); ++ xops[0] = target; ++ xops[1] = gen_lowpart (mode, t2); ++ xops[2] = gen_lowpart (mode, t1); ++ xops[3] = gen_rtx_EQ (maskmode, mask, vt); ++ xops[4] = mask; ++ xops[5] = vt; ++ ++ loongarch_expand_vec_cond_expr (mode, maskmode, xops); ++ if (target != operands[0]) ++ emit_move_insn (operands[0], ++ gen_lowpart (GET_MODE (operands[0]), target)); ++} ++ ++void ++loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel) ++{ ++ machine_mode vmode = GET_MODE (target); ++ auto nelt = GET_MODE_NUNITS (vmode); ++ auto round_reg = gen_reg_rtx (vmode); ++ rtx round_data[MAX_VECT_LEN]; ++ ++ for (int i = 0; i < nelt; i += 1) ++ { ++ round_data[i] = GEN_INT (0x1f); ++ } ++ ++ rtx round_data_rtx = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, round_data)); ++ emit_move_insn (round_reg, round_data_rtx); ++ ++ switch (vmode) ++ { ++ case E_V16QImode: ++ emit_insn (gen_andv16qi3 (sel, sel, round_reg)); ++ emit_insn (gen_lsx_vshuf_b (target, op1, op0, sel)); ++ break; ++ case E_V2DFmode: ++ emit_insn (gen_andv2di3 (sel, sel, round_reg)); ++ emit_insn (gen_lsx_vshuf_d_f (target, sel, op1, op0)); ++ break; ++ case E_V2DImode: ++ emit_insn (gen_andv2di3 (sel, sel, round_reg)); ++ emit_insn (gen_lsx_vshuf_d (target, sel, op1, op0)); ++ break; ++ case E_V4SFmode: ++ emit_insn (gen_andv4si3 (sel, sel, round_reg)); ++ emit_insn (gen_lsx_vshuf_w_f (target, sel, op1, op0)); ++ break; ++ case E_V4SImode: ++ emit_insn (gen_andv4si3 (sel, sel, round_reg)); ++ emit_insn (gen_lsx_vshuf_w (target, sel, op1, op0)); ++ break; ++ case E_V8HImode: ++ emit_insn (gen_andv8hi3 (sel, sel, round_reg)); ++ emit_insn (gen_lsx_vshuf_h (target, sel, op1, op0)); ++ break; ++ default: ++ break; ++ } ++} ++ ++static bool ++loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d) ++{ ++ int i; + rtx target, op0, op1, sel, tmp; + rtx rperm[MAX_VECT_LEN]; + +@@ -7724,25 +8362,1302 @@ loongarch_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) + return true; + } + +- if (loongarch_expand_lsx_shuffle (d)) +- return true; +- return false; +-} +- +-/* Implementation of constant vector permuatation. This function identifies +- * recognized pattern of permuation selector argument, and use one or more +- * instruction(s) to finish the permutation job correctly. For unsupported +- * patterns, it will return false. */ +- +-static bool +-loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) +-{ +- /* Although we have the LSX vec_perm template, there's still some +- 128bit vector permuatation operations send to vectorize_vec_perm_const. +- In this case, we just simpliy wrap them by single vshuf.* instruction, +- because LSX vshuf.* instruction just have the same behavior that GCC +- expects. */ +- return loongarch_try_expand_lsx_vshuf_const (d); ++ if (loongarch_expand_lsx_shuffle (d)) ++ return true; ++ if (loongarch_expand_vec_perm_even_odd (d)) ++ return true; ++ if (loongarch_expand_vec_perm_interleave (d)) ++ return true; ++ return false; ++} ++ ++/* Following are the assist function for const vector permutation support. */ ++static bool ++loongarch_is_quad_duplicate (struct expand_vec_perm_d *d) ++{ ++ if (d->perm[0] >= d->nelt / 2) ++ return false; ++ ++ bool result = true; ++ unsigned char lhs = d->perm[0]; ++ unsigned char rhs = d->perm[d->nelt / 2]; ++ ++ if ((rhs - lhs) != d->nelt / 2) ++ return false; ++ ++ for (int i = 1; i < d->nelt; i += 1) ++ { ++ if ((i < d->nelt / 2) && (d->perm[i] != lhs)) ++ { ++ result = false; ++ break; ++ } ++ if ((i > d->nelt / 2) && (d->perm[i] != rhs)) ++ { ++ result = false; ++ break; ++ } ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_double_duplicate (struct expand_vec_perm_d *d) ++{ ++ if (!d->one_vector_p) ++ return false; ++ ++ if (d->nelt < 8) ++ return false; ++ ++ bool result = true; ++ unsigned char buf = d->perm[0]; ++ ++ for (int i = 1; i < d->nelt; i += 2) ++ { ++ if (d->perm[i] != buf) ++ { ++ result = false; ++ break; ++ } ++ if (d->perm[i - 1] != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += d->nelt / 4; ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_odd_extraction (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ unsigned char buf = 1; ++ ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 2; ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_even_extraction (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ unsigned char buf = 0; ++ ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_extraction_permutation (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ unsigned char buf = d->perm[0]; ++ ++ if (buf != 0 || buf != d->nelt) ++ return false; ++ ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 2; ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_center_extraction (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ unsigned buf = d->nelt / 2; ++ ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_reversing_permutation (struct expand_vec_perm_d *d) ++{ ++ if (!d->one_vector_p) ++ return false; ++ ++ bool result = true; ++ unsigned char buf = d->nelt - 1; ++ ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (d->perm[i] != buf) ++ { ++ result = false; ++ break; ++ } ++ ++ buf -= 1; ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_di_misalign_extract (struct expand_vec_perm_d *d) ++{ ++ if (d->nelt != 4 && d->nelt != 8) ++ return false; ++ ++ bool result = true; ++ unsigned char buf; ++ ++ if (d->nelt == 4) ++ { ++ buf = 1; ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ ++ buf += 1; ++ } ++ } ++ else if (d->nelt == 8) ++ { ++ buf = 2; ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ ++ buf += 1; ++ } ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_si_misalign_extract (struct expand_vec_perm_d *d) ++{ ++ if (d->vmode != E_V8SImode && d->vmode != E_V8SFmode) ++ return false; ++ bool result = true; ++ unsigned char buf = 1; ++ ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_lasx_lowpart_interleave (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ unsigned char buf = 0; ++ ++ for (int i = 0;i < d->nelt; i += 2) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ ++ if (result) ++ { ++ buf = d->nelt; ++ for (int i = 1; i < d->nelt; i += 2) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_lasx_lowpart_interleave_2 (struct expand_vec_perm_d *d) ++{ ++ if (d->vmode != E_V32QImode) ++ return false; ++ bool result = true; ++ unsigned char buf = 0; ++ ++#define COMPARE_SELECTOR(INIT, BEGIN, END) \ ++ buf = INIT; \ ++ for (int i = BEGIN; i < END && result; i += 1) \ ++ { \ ++ if (buf != d->perm[i]) \ ++ { \ ++ result = false; \ ++ break; \ ++ } \ ++ buf += 1; \ ++ } ++ ++ COMPARE_SELECTOR (0, 0, 8); ++ COMPARE_SELECTOR (32, 8, 16); ++ COMPARE_SELECTOR (8, 16, 24); ++ COMPARE_SELECTOR (40, 24, 32); ++ ++#undef COMPARE_SELECTOR ++ return result; ++} ++ ++static bool ++loongarch_is_lasx_lowpart_extract (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ unsigned char buf = 0; ++ ++ for (int i = 0; i < d->nelt / 2; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ ++ if (result) ++ { ++ buf = d->nelt; ++ for (int i = d->nelt / 2; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_lasx_highpart_interleave (expand_vec_perm_d *d) ++{ ++ bool result = true; ++ unsigned char buf = d->nelt / 2; ++ ++ for (int i = 0; i < d->nelt; i += 2) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ ++ if (result) ++ { ++ buf = d->nelt + d->nelt / 2; ++ for (int i = 1; i < d->nelt;i += 2) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_lasx_highpart_interleave_2 (struct expand_vec_perm_d *d) ++{ ++ if (d->vmode != E_V32QImode) ++ return false; ++ ++ bool result = true; ++ unsigned char buf = 0; ++ ++#define COMPARE_SELECTOR(INIT, BEGIN, END) \ ++ buf = INIT; \ ++ for (int i = BEGIN; i < END && result; i += 1) \ ++ { \ ++ if (buf != d->perm[i]) \ ++ { \ ++ result = false; \ ++ break; \ ++ } \ ++ buf += 1; \ ++ } ++ ++ COMPARE_SELECTOR (16, 0, 8); ++ COMPARE_SELECTOR (48, 8, 16); ++ COMPARE_SELECTOR (24, 16, 24); ++ COMPARE_SELECTOR (56, 24, 32); ++ ++#undef COMPARE_SELECTOR ++ return result; ++} ++ ++static bool ++loongarch_is_elem_duplicate (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ unsigned char buf = d->perm[0]; ++ ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ } ++ ++ return result; ++} ++ ++inline bool ++loongarch_is_op_reverse_perm (struct expand_vec_perm_d *d) ++{ ++ return (d->vmode == E_V4DFmode) ++ && d->perm[0] == 2 && d->perm[1] == 3 ++ && d->perm[2] == 0 && d->perm[3] == 1; ++} ++ ++static bool ++loongarch_is_single_op_perm (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (d->perm[i] >= d->nelt) ++ { ++ result = false; ++ break; ++ } ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_divisible_perm (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ ++ for (int i = 0; i < d->nelt / 2; i += 1) ++ { ++ if (d->perm[i] >= d->nelt) ++ { ++ result = false; ++ break; ++ } ++ } ++ ++ if (result) ++ { ++ for (int i = d->nelt / 2; i < d->nelt; i += 1) ++ { ++ if (d->perm[i] < d->nelt) ++ { ++ result = false; ++ break; ++ } ++ } ++ } ++ ++ return result; ++} ++ ++inline bool ++loongarch_is_triple_stride_extract (struct expand_vec_perm_d *d) ++{ ++ return (d->vmode == E_V4DImode || d->vmode == E_V4DFmode) ++ && d->perm[0] == 1 && d->perm[1] == 4 ++ && d->perm[2] == 7 && d->perm[3] == 0; ++} ++ ++/* In LASX, some permutation insn does not have the behavior that gcc expects ++ * when compiler wants to emit a vector permutation. ++ * ++ * 1. What GCC provides via vectorize_vec_perm_const ()'s paramater: ++ * When GCC wants to performs a vector permutation, it provides two op ++ * reigster, one target register, and a selector. ++ * In const vector permutation case, GCC provides selector as a char array ++ * that contains original value; in variable vector permuatation ++ * (performs via vec_perm insn template), it provides a vector register. ++ * We assume that nelt is the elements numbers inside single vector in current ++ * 256bit vector mode. ++ * ++ * 2. What GCC expects to perform: ++ * Two op registers (op0, op1) will "combine" into a 512bit temp vector storage ++ * that has 2*nelt elements inside it; the low 256bit is op0, and high 256bit ++ * is op1, then the elements are indexed as below: ++ * 0 ~ nelt - 1 nelt ~ 2 * nelt - 1 ++ * |-------------------------|-------------------------| ++ * Low 256bit (op0) High 256bit (op1) ++ * For example, the second element in op1 (V8SImode) will be indexed with 9. ++ * Selector is a vector that has the same mode and number of elements with ++ * op0,op1 and target, it's look like this: ++ * 0 ~ nelt - 1 ++ * |-------------------------| ++ * 256bit (selector) ++ * It describes which element from 512bit temp vector storage will fit into ++ * target's every element slot. ++ * GCC expects that every element in selector can be ANY indices of 512bit ++ * vector storage (Selector can pick literally any element from op0 and op1, and ++ * then fits into any place of target register). This is also what LSX 128bit ++ * vshuf.* instruction do similarly, so we can handle 128bit vector permutation ++ * by single instruction easily. ++ * ++ * 3. What LASX permutation instruction does: ++ * In short, it just execute two independent 128bit vector permuatation, and ++ * it's the reason that we need to do the jobs below. We will explain it. ++ * op0, op1, target, and selector will be separate into high 128bit and low ++ * 128bit, and do permutation as the description below: ++ * ++ * a) op0's low 128bit and op1's low 128bit "combines" into a 256bit temp ++ * vector storage (TVS1), elements are indexed as below: ++ * 0 ~ nelt / 2 - 1 nelt / 2 ~ nelt - 1 ++ * |---------------------|---------------------| TVS1 ++ * op0's low 128bit op1's low 128bit ++ * op0's high 128bit and op1's high 128bit are "combined" into TVS2 in the ++ * same way. ++ * 0 ~ nelt / 2 - 1 nelt / 2 ~ nelt - 1 ++ * |---------------------|---------------------| TVS2 ++ * op0's high 128bit op1's high 128bit ++ * b) Selector's low 128bit describes which elements from TVS1 will fit into ++ * target vector's low 128bit. No TVS2 elements are allowed. ++ * c) Selector's high 128bit describes which elements from TVS2 will fit into ++ * target vector's high 128bit. No TVS1 elements are allowed. ++ * ++ * As we can see, if we want to handle vector permutation correctly, we can ++ * achieve it in three ways: ++ * a) Modify selector's elements, to make sure that every elements can inform ++ * correct value that will put into target vector. ++ b) Generate extra instruction before/after permutation instruction, for ++ adjusting op vector or target vector, to make sure target vector's value is ++ what GCC expects. ++ c) Use other instructions to process op and put correct result into target. ++ */ ++ ++/* Implementation of constant vector permuatation. This function identifies ++ * recognized pattern of permuation selector argument, and use one or more ++ * instruction(s) to finish the permutation job correctly. For unsupported ++ * patterns, it will return false. */ ++ ++static bool ++loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) ++{ ++ /* Although we have the LSX vec_perm template, there's still some ++ 128bit vector permuatation operations send to vectorize_vec_perm_const. ++ In this case, we just simpliy wrap them by single vshuf.* instruction, ++ because LSX vshuf.* instruction just have the same behavior that GCC ++ expects. */ ++ if (GET_MODE_SIZE (d->vmode) == 16) ++ return loongarch_try_expand_lsx_vshuf_const (d); ++ else ++ return false; ++ ++ bool ok = false, reverse_hi_lo = false, extract_ev_od = false, ++ use_alt_op = false; ++ unsigned char idx; ++ int i; ++ rtx target, op0, op1, sel, tmp; ++ rtx op0_alt = NULL_RTX, op1_alt = NULL_RTX; ++ rtx rperm[MAX_VECT_LEN]; ++ unsigned int remapped[MAX_VECT_LEN]; ++ ++ /* Try to figure out whether is a recognized permutation selector pattern, if ++ yes, we will reassign some elements with new value in selector argument, ++ and in some cases we will generate some assist insn to complete the ++ permutation. (Even in some cases, we use other insn to impl permutation ++ instead of xvshuf!) ++ ++ Make sure to check d->testing_p is false everytime if you want to emit new ++ insn, unless you want to crash into ICE directly. */ ++ if (loongarch_is_quad_duplicate (d)) ++ { ++ /* Selector example: E_V8SImode, { 0, 0, 0, 0, 4, 4, 4, 4 } ++ copy first elem from original selector to all elem in new selector. */ ++ idx = d->perm[0]; ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ remapped[i] = idx; ++ } ++ /* Selector after: { 0, 0, 0, 0, 0, 0, 0, 0 }. */ ++ } ++ else if (loongarch_is_double_duplicate (d)) ++ { ++ /* Selector example: E_V8SImode, { 1, 1, 3, 3, 5, 5, 7, 7 } ++ one_vector_p == true. */ ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ idx = d->perm[i]; ++ remapped[i] = idx; ++ remapped[i + d->nelt / 2] = idx; ++ } ++ /* Selector after: { 1, 1, 3, 3, 1, 1, 3, 3 }. */ ++ } ++ else if (loongarch_is_odd_extraction (d) ++ || loongarch_is_even_extraction (d)) ++ { ++ /* Odd extraction selector sample: E_V4DImode, { 1, 3, 5, 7 } ++ Selector after: { 1, 3, 1, 3 }. ++ Even extraction selector sample: E_V4DImode, { 0, 2, 4, 6 } ++ Selector after: { 0, 2, 0, 2 }. */ ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ idx = d->perm[i]; ++ remapped[i] = idx; ++ remapped[i + d->nelt / 2] = idx; ++ } ++ /* Additional insn is required for correct result. See codes below. */ ++ extract_ev_od = true; ++ } ++ else if (loongarch_is_extraction_permutation (d)) ++ { ++ /* Selector sample: E_V8SImode, { 0, 1, 2, 3, 4, 5, 6, 7 }. */ ++ if (d->perm[0] == 0) ++ { ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ remapped[i] = i; ++ remapped[i + d->nelt / 2] = i; ++ } ++ } ++ else ++ { ++ /* { 8, 9, 10, 11, 12, 13, 14, 15 }. */ ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ idx = i + d->nelt / 2; ++ remapped[i] = idx; ++ remapped[i + d->nelt / 2] = idx; ++ } ++ } ++ /* Selector after: { 0, 1, 2, 3, 0, 1, 2, 3 } ++ { 8, 9, 10, 11, 8, 9, 10, 11 } */ ++ } ++ else if (loongarch_is_center_extraction (d)) ++ { ++ /* sample: E_V4DImode, { 2, 3, 4, 5 } ++ In this condition, we can just copy high 128bit of op0 and low 128bit ++ of op1 to the target register by using xvpermi.q insn. */ ++ if (!d->testing_p) ++ { ++ emit_move_insn (d->target, d->op1); ++ switch (d->vmode) ++ { ++ case E_V4DImode: ++ emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target, ++ d->op0, GEN_INT (0x21))); ++ break; ++ case E_V4DFmode: ++ emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target, ++ d->op0, GEN_INT (0x21))); ++ break; ++ case E_V8SImode: ++ emit_insn (gen_lasx_xvpermi_q_v8si (d->target, d->target, ++ d->op0, GEN_INT (0x21))); ++ break; ++ case E_V8SFmode: ++ emit_insn (gen_lasx_xvpermi_q_v8sf (d->target, d->target, ++ d->op0, GEN_INT (0x21))); ++ break; ++ case E_V16HImode: ++ emit_insn (gen_lasx_xvpermi_q_v16hi (d->target, d->target, ++ d->op0, GEN_INT (0x21))); ++ break; ++ case E_V32QImode: ++ emit_insn (gen_lasx_xvpermi_q_v32qi (d->target, d->target, ++ d->op0, GEN_INT (0x21))); ++ break; ++ default: ++ break; ++ } ++ } ++ ok = true; ++ /* Finish the funtion directly. */ ++ goto expand_perm_const_2_end; ++ } ++ else if (loongarch_is_reversing_permutation (d)) ++ { ++ /* Selector sample: E_V8SImode, { 7, 6, 5, 4, 3, 2, 1, 0 } ++ one_vector_p == true */ ++ idx = d->nelt / 2 - 1; ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ remapped[i] = idx; ++ remapped[i + d->nelt / 2] = idx; ++ idx -= 1; ++ } ++ /* Selector after: { 3, 2, 1, 0, 3, 2, 1, 0 } ++ Additional insn will be generated to swap hi and lo 128bit of target ++ register. */ ++ reverse_hi_lo = true; ++ } ++ else if (loongarch_is_di_misalign_extract (d) ++ || loongarch_is_si_misalign_extract (d)) ++ { ++ /* Selector Sample: ++ DI misalign: E_V4DImode, { 1, 2, 3, 4 } ++ SI misalign: E_V8SImode, { 1, 2, 3, 4, 5, 6, 7, 8 } */ ++ if (!d->testing_p) ++ { ++ /* Copy original op0/op1 value to new temp register. ++ In some cases, operand register may be used in multiple place, so ++ we need new regiter instead modify original one, to avoid runtime ++ crashing or wrong value after execution. */ ++ use_alt_op = true; ++ op1_alt = gen_reg_rtx (d->vmode); ++ emit_move_insn (op1_alt, d->op1); ++ ++ /* Adjust op1 for selecting correct value in high 128bit of target ++ register. ++ op1: E_V4DImode, { 4, 5, 6, 7 } -> { 2, 3, 4, 5 }. */ ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, ++ conv_op0, GEN_INT (0x21))); ++ ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ remapped[i] = d->perm[i]; ++ remapped[i + d->nelt / 2] = d->perm[i]; ++ } ++ /* Selector after: ++ DI misalign: { 1, 2, 1, 2 } ++ SI misalign: { 1, 2, 3, 4, 1, 2, 3, 4 } */ ++ } ++ } ++ else if (loongarch_is_lasx_lowpart_interleave (d)) ++ { ++ /* Elements from op0's low 18bit and op1's 128bit are inserted into ++ target register alternately. ++ sample: E_V4DImode, { 0, 4, 1, 5 } */ ++ if (!d->testing_p) ++ { ++ /* Prepare temp register instead of modify original op. */ ++ use_alt_op = true; ++ op1_alt = gen_reg_rtx (d->vmode); ++ op0_alt = gen_reg_rtx (d->vmode); ++ emit_move_insn (op1_alt, d->op1); ++ emit_move_insn (op0_alt, d->op0); ++ ++ /* Generate subreg for fitting into insn gen function. */ ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ ++ /* Adjust op value in temp register. ++ op0 = {0,1,2,3}, op1 = {4,5,0,1} */ ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, ++ conv_op0, GEN_INT (0x02))); ++ /* op0 = {0,1,4,5}, op1 = {4,5,0,1} */ ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0, ++ conv_op1, GEN_INT (0x01))); ++ ++ /* Remap indices in selector based on the location of index inside ++ selector, and vector element numbers in current vector mode. */ ++ ++ /* Filling low 128bit of new selector. */ ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ /* value in odd-indexed slot of low 128bit part of selector ++ vector. */ ++ remapped[i] = i % 2 != 0 ? d->perm[i] - d->nelt / 2 : d->perm[i]; ++ } ++ /* Then filling the high 128bit. */ ++ for (i = d->nelt / 2; i < d->nelt; i += 1) ++ { ++ /* value in even-indexed slot of high 128bit part of ++ selector vector. */ ++ remapped[i] = i % 2 == 0 ++ ? d->perm[i] + (d->nelt / 2) * 3 : d->perm[i]; ++ } ++ } ++ } ++ else if (loongarch_is_lasx_lowpart_interleave_2 (d)) ++ { ++ /* Special lowpart interleave case in V32QI vector mode. It does the same ++ thing as we can see in if branch that above this line. ++ Selector sample: E_V32QImode, ++ {0, 1, 2, 3, 4, 5, 6, 7, 32, 33, 34, 35, 36, 37, 38, 39, 8, ++ 9, 10, 11, 12, 13, 14, 15, 40, 41, 42, 43, 44, 45, 46, 47} */ ++ if (!d->testing_p) ++ { ++ /* Solution for this case in very simple - covert op into V4DI mode, ++ and do same thing as previous if branch. */ ++ op1_alt = gen_reg_rtx (d->vmode); ++ op0_alt = gen_reg_rtx (d->vmode); ++ emit_move_insn (op1_alt, d->op1); ++ emit_move_insn (op0_alt, d->op0); ++ ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, ++ conv_op0, GEN_INT (0x02))); ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0, ++ conv_op1, GEN_INT (0x01))); ++ remapped[0] = 0; ++ remapped[1] = 4; ++ remapped[2] = 1; ++ remapped[3] = 5; ++ ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ rperm[i] = GEN_INT (remapped[i]); ++ } ++ ++ sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (4, rperm)); ++ sel = force_reg (E_V4DImode, sel); ++ emit_insn (gen_lasx_xvshuf_d (conv_target, sel, ++ conv_op1, conv_op0)); ++ } ++ ++ ok = true; ++ goto expand_perm_const_2_end; ++ } ++ else if (loongarch_is_lasx_lowpart_extract (d)) ++ { ++ /* Copy op0's low 128bit to target's low 128bit, and copy op1's low ++ 128bit to target's high 128bit. ++ Selector sample: E_V4DImode, { 0, 1, 4 ,5 } */ ++ if (!d->testing_p) ++ { ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); ++ rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ ++ /* We can achieve the expectation by using sinple xvpermi.q insn. */ ++ emit_move_insn (conv_target, conv_op1); ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_target, conv_target, ++ conv_op0, GEN_INT (0x20))); ++ } ++ ++ ok = true; ++ goto expand_perm_const_2_end; ++ } ++ else if (loongarch_is_lasx_highpart_interleave (d)) ++ { ++ /* Similar to lowpart interleave, elements from op0's high 128bit and ++ op1's high 128bit are inserted into target regiter alternately. ++ Selector sample: E_V8SImode, { 4, 12, 5, 13, 6, 14, 7, 15 } */ ++ if (!d->testing_p) ++ { ++ /* Prepare temp op register. */ ++ use_alt_op = true; ++ op1_alt = gen_reg_rtx (d->vmode); ++ op0_alt = gen_reg_rtx (d->vmode); ++ emit_move_insn (op1_alt, d->op1); ++ emit_move_insn (op0_alt, d->op0); ++ ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ /* Adjust op value in temp regiter. ++ op0 = { 0, 1, 2, 3 }, op1 = { 6, 7, 2, 3 } */ ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, ++ conv_op0, GEN_INT (0x13))); ++ /* op0 = { 2, 3, 6, 7 }, op1 = { 6, 7, 2, 3 } */ ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0, ++ conv_op1, GEN_INT (0x01))); ++ /* Remap indices in selector based on the location of index inside ++ selector, and vector element numbers in current vector mode. */ ++ ++ /* Filling low 128bit of new selector. */ ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ /* value in even-indexed slot of low 128bit part of selector ++ vector. */ ++ remapped[i] = i % 2 == 0 ? d->perm[i] - d->nelt / 2 : d->perm[i]; ++ } ++ /* Then filling the high 128bit. */ ++ for (i = d->nelt / 2; i < d->nelt; i += 1) ++ { ++ /* value in odd-indexed slot of high 128bit part of selector ++ vector. */ ++ remapped[i] = i % 2 != 0 ++ ? d->perm[i] - (d->nelt / 2) * 3 : d->perm[i]; ++ } ++ } ++ } ++ else if (loongarch_is_lasx_highpart_interleave_2 (d)) ++ { ++ /* Special highpart interleave case in V32QI vector mode. It does the ++ same thing as the normal version above. ++ Selector sample: E_V32QImode, ++ {16, 17, 18, 19, 20, 21, 22, 23, 48, 49, 50, 51, 52, 53, 54, 55, ++ 24, 25, 26, 27, 28, 29, 30, 31, 56, 57, 58, 59, 60, 61, 62, 63} ++ */ ++ if (!d->testing_p) ++ { ++ /* Convert op into V4DImode and do the things. */ ++ op1_alt = gen_reg_rtx (d->vmode); ++ op0_alt = gen_reg_rtx (d->vmode); ++ emit_move_insn (op1_alt, d->op1); ++ emit_move_insn (op0_alt, d->op0); ++ ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, ++ conv_op0, GEN_INT (0x13))); ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0, ++ conv_op1, GEN_INT (0x01))); ++ remapped[0] = 2; ++ remapped[1] = 6; ++ remapped[2] = 3; ++ remapped[3] = 7; ++ ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ rperm[i] = GEN_INT (remapped[i]); ++ } ++ ++ sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (4, rperm)); ++ sel = force_reg (E_V4DImode, sel); ++ emit_insn (gen_lasx_xvshuf_d (conv_target, sel, ++ conv_op1, conv_op0)); ++ } ++ ++ ok = true; ++ goto expand_perm_const_2_end; ++ } ++ else if (loongarch_is_elem_duplicate (d)) ++ { ++ /* Brocast single element (from op0 or op1) to all slot of target ++ register. ++ Selector sample:E_V8SImode, { 2, 2, 2, 2, 2, 2, 2, 2 } */ ++ if (!d->testing_p) ++ { ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); ++ rtx temp_reg = gen_reg_rtx (d->vmode); ++ rtx conv_temp = gen_rtx_SUBREG (E_V4DImode, temp_reg, 0); ++ ++ emit_move_insn (temp_reg, d->op0); ++ ++ idx = d->perm[0]; ++ /* We will use xvrepl128vei.* insn to achieve the result, but we need ++ to make the high/low 128bit has the same contents that contain the ++ value that we need to broardcast, because xvrepl128vei does the ++ broardcast job from every 128bit of source register to ++ corresponded part of target register! (A deep sigh.) */ ++ if (/*idx >= 0 &&*/ idx < d->nelt / 2) ++ { ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp, ++ conv_op0, GEN_INT (0x0))); ++ } ++ else if (idx >= d->nelt / 2 && idx < d->nelt) ++ { ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp, ++ conv_op0, GEN_INT (0x11))); ++ idx -= d->nelt / 2; ++ } ++ else if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2)) ++ { ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp, ++ conv_op1, GEN_INT (0x0))); ++ } ++ else if (idx >= (d->nelt + d->nelt / 2) && idx < d->nelt * 2) ++ { ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp, ++ conv_op1, GEN_INT (0x11))); ++ idx -= d->nelt / 2; ++ } ++ ++ /* Then we can finally generate this insn. */ ++ switch (d->vmode) ++ { ++ case E_V4DImode: ++ emit_insn (gen_lasx_xvrepl128vei_d (d->target, temp_reg, ++ GEN_INT (idx))); ++ break; ++ case E_V4DFmode: ++ emit_insn (gen_lasx_xvrepl128vei_d_f (d->target, temp_reg, ++ GEN_INT (idx))); ++ break; ++ case E_V8SImode: ++ emit_insn (gen_lasx_xvrepl128vei_w (d->target, temp_reg, ++ GEN_INT (idx))); ++ break; ++ case E_V8SFmode: ++ emit_insn (gen_lasx_xvrepl128vei_w_f (d->target, temp_reg, ++ GEN_INT (idx))); ++ break; ++ case E_V16HImode: ++ emit_insn (gen_lasx_xvrepl128vei_h (d->target, temp_reg, ++ GEN_INT (idx))); ++ break; ++ case E_V32QImode: ++ emit_insn (gen_lasx_xvrepl128vei_b (d->target, temp_reg, ++ GEN_INT (idx))); ++ break; ++ default: ++ gcc_unreachable (); ++ break; ++ } ++ ++ /* finish func directly. */ ++ ok = true; ++ goto expand_perm_const_2_end; ++ } ++ } ++ else if (loongarch_is_op_reverse_perm (d)) ++ { ++ /* reverse high 128bit and low 128bit in op0. ++ Selector sample: E_V4DFmode, { 2, 3, 0, 1 } ++ Use xvpermi.q for doing this job. */ ++ if (!d->testing_p) ++ { ++ if (d->vmode == E_V4DImode) ++ { ++ emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target, d->op0, ++ GEN_INT (0x01))); ++ } ++ else if (d->vmode == E_V4DFmode) ++ { ++ emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target, d->op0, ++ GEN_INT (0x01))); ++ } ++ else ++ { ++ gcc_unreachable (); ++ } ++ } ++ ++ ok = true; ++ goto expand_perm_const_2_end; ++ } ++ else if (loongarch_is_single_op_perm (d)) ++ { ++ /* Permutation that only select elements from op0. */ ++ if (!d->testing_p) ++ { ++ /* Prepare temp register instead of modify original op. */ ++ use_alt_op = true; ++ op0_alt = gen_reg_rtx (d->vmode); ++ op1_alt = gen_reg_rtx (d->vmode); ++ ++ emit_move_insn (op0_alt, d->op0); ++ emit_move_insn (op1_alt, d->op1); ++ ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); ++ rtx conv_op0a = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ rtx conv_op1a = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ ++ /* Duplicate op0's low 128bit in op0, then duplicate high 128bit ++ in op1. After this, xvshuf.* insn's selector argument can ++ access all elements we need for correct permutation result. */ ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0a, conv_op0a, conv_op0, ++ GEN_INT (0x00))); ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1a, conv_op1a, conv_op0, ++ GEN_INT (0x11))); ++ ++ /* In this case, there's no need to remap selector's indices. */ ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ remapped[i] = d->perm[i]; ++ } ++ } ++ } ++ else if (loongarch_is_divisible_perm (d)) ++ { ++ /* Divisible perm: ++ Low 128bit of selector only selects elements of op0, ++ and high 128bit of selector only selects elements of op1. */ ++ ++ if (!d->testing_p) ++ { ++ /* Prepare temp register instead of modify original op. */ ++ use_alt_op = true; ++ op0_alt = gen_reg_rtx (d->vmode); ++ op1_alt = gen_reg_rtx (d->vmode); ++ ++ emit_move_insn (op0_alt, d->op0); ++ emit_move_insn (op1_alt, d->op1); ++ ++ rtx conv_op0a = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ rtx conv_op1a = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0); ++ ++ /* Reorganize op0's hi/lo 128bit and op1's hi/lo 128bit, to make sure ++ that selector's low 128bit can access all op0's elements, and ++ selector's high 128bit can access all op1's elements. */ ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0a, conv_op0a, conv_op1, ++ GEN_INT (0x02))); ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1a, conv_op1a, conv_op0, ++ GEN_INT (0x31))); ++ ++ /* No need to modify indices. */ ++ for (i = 0; i < d->nelt;i += 1) ++ { ++ remapped[i] = d->perm[i]; ++ } ++ } ++ } ++ else if (loongarch_is_triple_stride_extract (d)) ++ { ++ /* Selector sample: E_V4DFmode, { 1, 4, 7, 0 }. */ ++ if (!d->testing_p) ++ { ++ /* Resolve it with brute force modification. */ ++ remapped[0] = 1; ++ remapped[1] = 2; ++ remapped[2] = 3; ++ remapped[3] = 0; ++ } ++ } ++ else ++ { ++ /* When all of the detections above are failed, we will try last ++ strategy. ++ The for loop tries to detect following rules based on indices' value, ++ its position inside of selector vector ,and strange behavior of ++ xvshuf.* insn; Then we take corresponding action. (Replace with new ++ value, or give up whole permutation expansion.) */ ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ /* % (2 * d->nelt) */ ++ idx = d->perm[i]; ++ ++ /* if index is located in low 128bit of selector vector. */ ++ if (i < d->nelt / 2) ++ { ++ /* Fail case 1: index tries to reach element that located in op0's ++ high 128bit. */ ++ if (idx >= d->nelt / 2 && idx < d->nelt) ++ { ++ goto expand_perm_const_2_end; ++ } ++ /* Fail case 2: index tries to reach element that located in ++ op1's high 128bit. */ ++ if (idx >= (d->nelt + d->nelt / 2)) ++ { ++ goto expand_perm_const_2_end; ++ } ++ ++ /* Success case: index tries to reach elements that located in ++ op1's low 128bit. Apply - (nelt / 2) offset to original ++ value. */ ++ if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2)) ++ { ++ idx -= d->nelt / 2; ++ } ++ } ++ /* if index is located in high 128bit of selector vector. */ ++ else ++ { ++ /* Fail case 1: index tries to reach element that located in ++ op1's low 128bit. */ ++ if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2)) ++ { ++ goto expand_perm_const_2_end; ++ } ++ /* Fail case 2: index tries to reach element that located in ++ op0's low 128bit. */ ++ if (idx < (d->nelt / 2)) ++ { ++ goto expand_perm_const_2_end; ++ } ++ /* Success case: index tries to reach element that located in ++ op0's high 128bit. */ ++ if (idx >= d->nelt / 2 && idx < d->nelt) ++ { ++ idx -= d->nelt / 2; ++ } ++ } ++ /* No need to process other case that we did not mentioned. */ ++ ++ /* Assign with original or processed value. */ ++ remapped[i] = idx; ++ } ++ } ++ ++ ok = true; ++ /* If testing_p is true, compiler is trying to figure out that backend can ++ handle this permutation, but doesn't want to generate actual insn. So ++ if true, exit directly. */ ++ if (d->testing_p) ++ { ++ goto expand_perm_const_2_end; ++ } ++ ++ /* Convert remapped selector array to RTL array. */ ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ rperm[i] = GEN_INT (remapped[i]); ++ } ++ ++ /* Copy selector vector from memory to vector regiter for later insn gen ++ function. ++ If vector's element in floating point value, we cannot fit selector ++ argument into insn gen function directly, because of the insn template ++ definition. As a solution, generate a integral mode subreg of target, ++ then copy selector vector (that is in integral mode) to this subreg. */ ++ switch (d->vmode) ++ { ++ case E_V4DFmode: ++ sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (d->nelt, rperm)); ++ tmp = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ emit_move_insn (tmp, sel); ++ break; ++ case E_V8SFmode: ++ sel = gen_rtx_CONST_VECTOR (E_V8SImode, gen_rtvec_v (d->nelt, rperm)); ++ tmp = gen_rtx_SUBREG (E_V8SImode, d->target, 0); ++ emit_move_insn (tmp, sel); ++ break; ++ default: ++ sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt, rperm)); ++ emit_move_insn (d->target, sel); ++ break; ++ } ++ ++ target = d->target; ++ /* If temp op registers are requested in previous if branch, then use temp ++ register intead of original one. */ ++ if (use_alt_op) ++ { ++ op0 = op0_alt != NULL_RTX ? op0_alt : d->op0; ++ op1 = op1_alt != NULL_RTX ? op1_alt : d->op1; ++ } ++ else ++ { ++ op0 = d->op0; ++ op1 = d->one_vector_p ? d->op0 : d->op1; ++ } ++ ++ /* We FINALLY can generate xvshuf.* insn. */ ++ switch (d->vmode) ++ { ++ case E_V4DFmode: ++ emit_insn (gen_lasx_xvshuf_d_f (target, target, op1, op0)); ++ break; ++ case E_V4DImode: ++ emit_insn (gen_lasx_xvshuf_d (target, target, op1, op0)); ++ break; ++ case E_V8SFmode: ++ emit_insn (gen_lasx_xvshuf_w_f (target, target, op1, op0)); ++ break; ++ case E_V8SImode: ++ emit_insn (gen_lasx_xvshuf_w (target, target, op1, op0)); ++ break; ++ case E_V16HImode: ++ emit_insn (gen_lasx_xvshuf_h (target, target, op1, op0)); ++ break; ++ case E_V32QImode: ++ emit_insn (gen_lasx_xvshuf_b (target, op1, op0, target)); ++ break; ++ default: ++ gcc_unreachable (); ++ break; ++ } ++ ++ /* Extra insn for swapping the hi/lo 128bit of target vector register. */ ++ if (reverse_hi_lo) ++ { ++ switch (d->vmode) ++ { ++ case E_V4DFmode: ++ emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target, ++ d->target, GEN_INT (0x1))); ++ break; ++ case E_V4DImode: ++ emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target, ++ d->target, GEN_INT (0x1))); ++ break; ++ case E_V8SFmode: ++ emit_insn (gen_lasx_xvpermi_q_v8sf (d->target, d->target, ++ d->target, GEN_INT (0x1))); ++ break; ++ case E_V8SImode: ++ emit_insn (gen_lasx_xvpermi_q_v8si (d->target, d->target, ++ d->target, GEN_INT (0x1))); ++ break; ++ case E_V16HImode: ++ emit_insn (gen_lasx_xvpermi_q_v16hi (d->target, d->target, ++ d->target, GEN_INT (0x1))); ++ break; ++ case E_V32QImode: ++ emit_insn (gen_lasx_xvpermi_q_v32qi (d->target, d->target, ++ d->target, GEN_INT (0x1))); ++ break; ++ default: ++ break; ++ } ++ } ++ /* Extra insn required by odd/even extraction. Swapping the second and third ++ 64bit in target vector register. */ ++ else if (extract_ev_od) ++ { ++ rtx converted = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ emit_insn (gen_lasx_xvpermi_d_v4di (converted, converted, ++ GEN_INT (0xD8))); ++ } ++ ++expand_perm_const_2_end: ++ return ok; + } + + /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */ +@@ -7813,6 +9728,12 @@ loongarch_vectorize_vec_perm_const (machine_mode vmode, machine_mode op_mode, + break; + } + ++ // Do rounding for selector to avoid vshuf undefined behavior. ++ for (i = 0; i < d.nelt; i += 1) ++ { ++ d.perm[i] %= (d.nelt * 2); ++ } ++ + if (d.testing_p) + { + d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1); +@@ -7865,7 +9786,7 @@ loongarch_cpu_sched_reassociation_width (struct loongarch_target *target, + case CPU_LOONGARCH64: + case CPU_LA464: + /* Vector part. */ +- if (LSX_SUPPORTED_MODE_P (mode)) ++ if (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode)) + { + /* Integer vector instructions execute in FP unit. + The width of integer/float-point vector instructions is 3. */ +@@ -7915,6 +9836,44 @@ loongarch_expand_vector_extract (rtx target, rtx vec, int elt) + case E_V16QImode: + break; + ++ case E_V32QImode: ++ if (ISA_HAS_LASX) ++ { ++ if (elt >= 16) ++ { ++ tmp = gen_reg_rtx (V32QImode); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (tmp, vec, GEN_INT (0xe))); ++ loongarch_expand_vector_extract (target, ++ gen_lowpart (V16QImode, tmp), ++ elt & 15); ++ } ++ else ++ loongarch_expand_vector_extract (target, ++ gen_lowpart (V16QImode, vec), ++ elt & 15); ++ return; ++ } ++ break; ++ ++ case E_V16HImode: ++ if (ISA_HAS_LASX) ++ { ++ if (elt >= 8) ++ { ++ tmp = gen_reg_rtx (V16HImode); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (tmp, vec, GEN_INT (0xe))); ++ loongarch_expand_vector_extract (target, ++ gen_lowpart (V8HImode, tmp), ++ elt & 7); ++ } ++ else ++ loongarch_expand_vector_extract (target, ++ gen_lowpart (V8HImode, vec), ++ elt & 7); ++ return; ++ } ++ break; ++ + default: + break; + } +@@ -7953,6 +9912,31 @@ emit_reduc_half (rtx dest, rtx src, int i) + case E_V2DFmode: + tem = gen_lsx_vbsrl_d_f (dest, src, GEN_INT (8)); + break; ++ case E_V8SFmode: ++ if (i == 256) ++ tem = gen_lasx_xvpermi_d_v8sf (dest, src, GEN_INT (0xe)); ++ else ++ tem = gen_lasx_xvshuf4i_w_f (dest, src, ++ GEN_INT (i == 128 ? 2 + (3 << 2) : 1)); ++ break; ++ case E_V4DFmode: ++ if (i == 256) ++ tem = gen_lasx_xvpermi_d_v4df (dest, src, GEN_INT (0xe)); ++ else ++ tem = gen_lasx_xvpermi_d_v4df (dest, src, const1_rtx); ++ break; ++ case E_V32QImode: ++ case E_V16HImode: ++ case E_V8SImode: ++ case E_V4DImode: ++ d = gen_reg_rtx (V4DImode); ++ if (i == 256) ++ tem = gen_lasx_xvpermi_d_v4di (d, gen_lowpart (V4DImode, src), ++ GEN_INT (0xe)); ++ else ++ tem = gen_lasx_xvbsrl_d (d, gen_lowpart (V4DImode, src), ++ GEN_INT (i/16)); ++ break; + case E_V16QImode: + case E_V8HImode: + case E_V4SImode: +@@ -8000,10 +9984,57 @@ loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p) + { + machine_mode imode = GET_MODE (operands[1]); + rtx (*unpack) (rtx, rtx, rtx); ++ rtx (*extend) (rtx, rtx); + rtx (*cmpFunc) (rtx, rtx, rtx); ++ rtx (*swap_hi_lo) (rtx, rtx, rtx, rtx); + rtx tmp, dest; + +- if (ISA_HAS_LSX) ++ if (ISA_HAS_LASX && GET_MODE_SIZE (imode) == 32) ++ { ++ switch (imode) ++ { ++ case E_V8SImode: ++ if (unsigned_p) ++ extend = gen_lasx_vext2xv_du_wu; ++ else ++ extend = gen_lasx_vext2xv_d_w; ++ swap_hi_lo = gen_lasx_xvpermi_q_v8si; ++ break; ++ ++ case E_V16HImode: ++ if (unsigned_p) ++ extend = gen_lasx_vext2xv_wu_hu; ++ else ++ extend = gen_lasx_vext2xv_w_h; ++ swap_hi_lo = gen_lasx_xvpermi_q_v16hi; ++ break; ++ ++ case E_V32QImode: ++ if (unsigned_p) ++ extend = gen_lasx_vext2xv_hu_bu; ++ else ++ extend = gen_lasx_vext2xv_h_b; ++ swap_hi_lo = gen_lasx_xvpermi_q_v32qi; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ break; ++ } ++ ++ if (high_p) ++ { ++ tmp = gen_reg_rtx (imode); ++ emit_insn (swap_hi_lo (tmp, tmp, operands[1], const1_rtx)); ++ emit_insn (extend (operands[0], tmp)); ++ return; ++ } ++ ++ emit_insn (extend (operands[0], operands[1])); ++ return; ++ ++ } ++ else if (ISA_HAS_LSX) + { + switch (imode) + { +@@ -8104,8 +10135,17 @@ loongarch_gen_const_int_vector_shuffle (machine_mode mode, int val) + return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nunits, elts)); + } + ++ + /* Expand a vector initialization. */ + ++void ++loongarch_expand_vector_group_init (rtx target, rtx vals) ++{ ++ rtx ops[2] = { XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1) }; ++ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (E_V32QImode, ops[0], ++ ops[1]))); ++} ++ + void + loongarch_expand_vector_init (rtx target, rtx vals) + { +@@ -8125,6 +10165,285 @@ loongarch_expand_vector_init (rtx target, rtx vals) + all_same = false; + } + ++ if (ISA_HAS_LASX && GET_MODE_SIZE (vmode) == 32) ++ { ++ if (all_same) ++ { ++ rtx same = XVECEXP (vals, 0, 0); ++ rtx temp, temp2; ++ ++ if (CONST_INT_P (same) && nvar == 0 ++ && loongarch_signed_immediate_p (INTVAL (same), 10, 0)) ++ { ++ switch (vmode) ++ { ++ case E_V32QImode: ++ case E_V16HImode: ++ case E_V8SImode: ++ case E_V4DImode: ++ temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)); ++ emit_move_insn (target, temp); ++ return; ++ ++ default: ++ gcc_unreachable (); ++ } ++ } ++ ++ temp = gen_reg_rtx (imode); ++ if (imode == GET_MODE (same)) ++ temp2 = same; ++ else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) ++ { ++ if (GET_CODE (same) == MEM) ++ { ++ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); ++ loongarch_emit_move (reg_tmp, same); ++ temp2 = simplify_gen_subreg (imode, reg_tmp, ++ GET_MODE (reg_tmp), 0); ++ } ++ else ++ temp2 = simplify_gen_subreg (imode, same, ++ GET_MODE (same), 0); ++ } ++ else ++ { ++ if (GET_CODE (same) == MEM) ++ { ++ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); ++ loongarch_emit_move (reg_tmp, same); ++ temp2 = lowpart_subreg (imode, reg_tmp, ++ GET_MODE (reg_tmp)); ++ } ++ else ++ temp2 = lowpart_subreg (imode, same, GET_MODE (same)); ++ } ++ emit_move_insn (temp, temp2); ++ ++ switch (vmode) ++ { ++ case E_V32QImode: ++ case E_V16HImode: ++ case E_V8SImode: ++ case E_V4DImode: ++ loongarch_emit_move (target, ++ gen_rtx_VEC_DUPLICATE (vmode, temp)); ++ break; ++ ++ case E_V8SFmode: ++ emit_insn (gen_lasx_xvreplve0_w_f_scalar (target, temp)); ++ break; ++ ++ case E_V4DFmode: ++ emit_insn (gen_lasx_xvreplve0_d_f_scalar (target, temp)); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ } ++ else ++ { ++ rtvec vec = shallow_copy_rtvec (XVEC (vals, 0)); ++ ++ for (i = 0; i < nelt; ++i) ++ RTVEC_ELT (vec, i) = CONST0_RTX (imode); ++ ++ emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, vec)); ++ ++ machine_mode half_mode = VOIDmode; ++ rtx target_hi, target_lo; ++ ++ switch (vmode) ++ { ++ case E_V32QImode: ++ half_mode=E_V16QImode; ++ target_hi = gen_reg_rtx (half_mode); ++ target_lo = gen_reg_rtx (half_mode); ++ for (i = 0; i < nelt/2; ++i) ++ { ++ rtx temp_hi = gen_reg_rtx (imode); ++ rtx temp_lo = gen_reg_rtx (imode); ++ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); ++ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); ++ if (i == 0) ++ { ++ emit_insn (gen_lsx_vreplvei_b_scalar (target_hi, ++ temp_hi)); ++ emit_insn (gen_lsx_vreplvei_b_scalar (target_lo, ++ temp_lo)); ++ } ++ else ++ { ++ emit_insn (gen_vec_setv16qi (target_hi, temp_hi, ++ GEN_INT (i))); ++ emit_insn (gen_vec_setv16qi (target_lo, temp_lo, ++ GEN_INT (i))); ++ } ++ } ++ emit_insn (gen_rtx_SET (target, ++ gen_rtx_VEC_CONCAT (vmode, target_hi, ++ target_lo))); ++ break; ++ ++ case E_V16HImode: ++ half_mode=E_V8HImode; ++ target_hi = gen_reg_rtx (half_mode); ++ target_lo = gen_reg_rtx (half_mode); ++ for (i = 0; i < nelt/2; ++i) ++ { ++ rtx temp_hi = gen_reg_rtx (imode); ++ rtx temp_lo = gen_reg_rtx (imode); ++ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); ++ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); ++ if (i == 0) ++ { ++ emit_insn (gen_lsx_vreplvei_h_scalar (target_hi, ++ temp_hi)); ++ emit_insn (gen_lsx_vreplvei_h_scalar (target_lo, ++ temp_lo)); ++ } ++ else ++ { ++ emit_insn (gen_vec_setv8hi (target_hi, temp_hi, ++ GEN_INT (i))); ++ emit_insn (gen_vec_setv8hi (target_lo, temp_lo, ++ GEN_INT (i))); ++ } ++ } ++ emit_insn (gen_rtx_SET (target, ++ gen_rtx_VEC_CONCAT (vmode, target_hi, ++ target_lo))); ++ break; ++ ++ case E_V8SImode: ++ half_mode=V4SImode; ++ target_hi = gen_reg_rtx (half_mode); ++ target_lo = gen_reg_rtx (half_mode); ++ for (i = 0; i < nelt/2; ++i) ++ { ++ rtx temp_hi = gen_reg_rtx (imode); ++ rtx temp_lo = gen_reg_rtx (imode); ++ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); ++ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); ++ if (i == 0) ++ { ++ emit_insn (gen_lsx_vreplvei_w_scalar (target_hi, ++ temp_hi)); ++ emit_insn (gen_lsx_vreplvei_w_scalar (target_lo, ++ temp_lo)); ++ } ++ else ++ { ++ emit_insn (gen_vec_setv4si (target_hi, temp_hi, ++ GEN_INT (i))); ++ emit_insn (gen_vec_setv4si (target_lo, temp_lo, ++ GEN_INT (i))); ++ } ++ } ++ emit_insn (gen_rtx_SET (target, ++ gen_rtx_VEC_CONCAT (vmode, target_hi, ++ target_lo))); ++ break; ++ ++ case E_V4DImode: ++ half_mode=E_V2DImode; ++ target_hi = gen_reg_rtx (half_mode); ++ target_lo = gen_reg_rtx (half_mode); ++ for (i = 0; i < nelt/2; ++i) ++ { ++ rtx temp_hi = gen_reg_rtx (imode); ++ rtx temp_lo = gen_reg_rtx (imode); ++ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); ++ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); ++ if (i == 0) ++ { ++ emit_insn (gen_lsx_vreplvei_d_scalar (target_hi, ++ temp_hi)); ++ emit_insn (gen_lsx_vreplvei_d_scalar (target_lo, ++ temp_lo)); ++ } ++ else ++ { ++ emit_insn (gen_vec_setv2di (target_hi, temp_hi, ++ GEN_INT (i))); ++ emit_insn (gen_vec_setv2di (target_lo, temp_lo, ++ GEN_INT (i))); ++ } ++ } ++ emit_insn (gen_rtx_SET (target, ++ gen_rtx_VEC_CONCAT (vmode, target_hi, ++ target_lo))); ++ break; ++ ++ case E_V8SFmode: ++ half_mode=E_V4SFmode; ++ target_hi = gen_reg_rtx (half_mode); ++ target_lo = gen_reg_rtx (half_mode); ++ for (i = 0; i < nelt/2; ++i) ++ { ++ rtx temp_hi = gen_reg_rtx (imode); ++ rtx temp_lo = gen_reg_rtx (imode); ++ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); ++ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); ++ if (i == 0) ++ { ++ emit_insn (gen_lsx_vreplvei_w_f_scalar (target_hi, ++ temp_hi)); ++ emit_insn (gen_lsx_vreplvei_w_f_scalar (target_lo, ++ temp_lo)); ++ } ++ else ++ { ++ emit_insn (gen_vec_setv4sf (target_hi, temp_hi, ++ GEN_INT (i))); ++ emit_insn (gen_vec_setv4sf (target_lo, temp_lo, ++ GEN_INT (i))); ++ } ++ } ++ emit_insn (gen_rtx_SET (target, ++ gen_rtx_VEC_CONCAT (vmode, target_hi, ++ target_lo))); ++ break; ++ ++ case E_V4DFmode: ++ half_mode=E_V2DFmode; ++ target_hi = gen_reg_rtx (half_mode); ++ target_lo = gen_reg_rtx (half_mode); ++ for (i = 0; i < nelt/2; ++i) ++ { ++ rtx temp_hi = gen_reg_rtx (imode); ++ rtx temp_lo = gen_reg_rtx (imode); ++ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); ++ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); ++ if (i == 0) ++ { ++ emit_insn (gen_lsx_vreplvei_d_f_scalar (target_hi, ++ temp_hi)); ++ emit_insn (gen_lsx_vreplvei_d_f_scalar (target_lo, ++ temp_lo)); ++ } ++ else ++ { ++ emit_insn (gen_vec_setv2df (target_hi, temp_hi, ++ GEN_INT (i))); ++ emit_insn (gen_vec_setv2df (target_lo, temp_lo, ++ GEN_INT (i))); ++ } ++ } ++ emit_insn (gen_rtx_SET (target, ++ gen_rtx_VEC_CONCAT (vmode, target_hi, ++ target_lo))); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ } ++ return; ++ } ++ + if (ISA_HAS_LSX) + { + if (all_same) +@@ -8372,6 +10691,38 @@ loongarch_expand_lsx_cmp (rtx dest, enum rtx_code cond, rtx op0, rtx op1) + } + break; + ++ case E_V8SFmode: ++ case E_V4DFmode: ++ switch (cond) ++ { ++ case UNORDERED: ++ case ORDERED: ++ case EQ: ++ case NE: ++ case UNEQ: ++ case UNLE: ++ case UNLT: ++ break; ++ case LTGT: cond = NE; break; ++ case UNGE: cond = UNLE; std::swap (op0, op1); break; ++ case UNGT: cond = UNLT; std::swap (op0, op1); break; ++ case LE: unspec = UNSPEC_LASX_XVFCMP_SLE; break; ++ case LT: unspec = UNSPEC_LASX_XVFCMP_SLT; break; ++ case GE: unspec = UNSPEC_LASX_XVFCMP_SLE; std::swap (op0, op1); break; ++ case GT: unspec = UNSPEC_LASX_XVFCMP_SLT; std::swap (op0, op1); break; ++ default: ++ gcc_unreachable (); ++ } ++ if (unspec < 0) ++ loongarch_emit_binary (cond, dest, op0, op1); ++ else ++ { ++ rtx x = gen_rtx_UNSPEC (GET_MODE (dest), ++ gen_rtvec (2, op0, op1), unspec); ++ emit_insn (gen_rtx_SET (dest, x)); ++ } ++ break; ++ + default: + gcc_unreachable (); + break; +@@ -8709,7 +11060,7 @@ loongarch_builtin_support_vector_misalignment (machine_mode mode, + int misalignment, + bool is_packed) + { +- if (ISA_HAS_LSX && STRICT_ALIGNMENT) ++ if ((ISA_HAS_LSX || ISA_HAS_LASX) && STRICT_ALIGNMENT) + { + if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing) + return false; +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index c3ebea2f2..b2295c589 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -186,6 +186,11 @@ along with GCC; see the file COPYING3. If not see + /* Width of a LSX vector register in bits. */ + #define BITS_PER_LSX_REG (UNITS_PER_LSX_REG * BITS_PER_UNIT) + ++/* Width of a LASX vector register in bytes. */ ++#define UNITS_PER_LASX_REG 32 ++/* Width of a LASX vector register in bits. */ ++#define BITS_PER_LASX_REG (UNITS_PER_LASX_REG * BITS_PER_UNIT) ++ + /* For LARCH, width of a floating point register. */ + #define UNITS_PER_FPREG (TARGET_DOUBLE_FLOAT ? 8 : 4) + +@@ -248,10 +253,11 @@ along with GCC; see the file COPYING3. If not see + #define STRUCTURE_SIZE_BOUNDARY 8 + + /* There is no point aligning anything to a rounder boundary than +- LONG_DOUBLE_TYPE_SIZE, unless under LSX the bigggest alignment is +- BITS_PER_LSX_REG/.. */ ++ LONG_DOUBLE_TYPE_SIZE, unless under LSX/LASX the bigggest alignment is ++ BITS_PER_LSX_REG/BITS_PER_LASX_REG/.. */ + #define BIGGEST_ALIGNMENT \ +- (ISA_HAS_LSX ? BITS_PER_LSX_REG : LONG_DOUBLE_TYPE_SIZE) ++ (ISA_HAS_LASX? BITS_PER_LASX_REG \ ++ : (ISA_HAS_LSX ? BITS_PER_LSX_REG : LONG_DOUBLE_TYPE_SIZE)) + + /* All accesses must be aligned. */ + #define STRICT_ALIGNMENT (TARGET_STRICT_ALIGN) +@@ -391,6 +397,10 @@ along with GCC; see the file COPYING3. If not see + #define LSX_REG_LAST FP_REG_LAST + #define LSX_REG_NUM FP_REG_NUM + ++#define LASX_REG_FIRST FP_REG_FIRST ++#define LASX_REG_LAST FP_REG_LAST ++#define LASX_REG_NUM FP_REG_NUM ++ + /* The DWARF 2 CFA column which tracks the return address from a + signal handler context. This means that to maintain backwards + compatibility, no hard register can be assigned this column if it +@@ -409,9 +419,12 @@ along with GCC; see the file COPYING3. If not see + ((unsigned int) ((int) (REGNO) - FCC_REG_FIRST) < FCC_REG_NUM) + #define LSX_REG_P(REGNO) \ + ((unsigned int) ((int) (REGNO) - LSX_REG_FIRST) < LSX_REG_NUM) ++#define LASX_REG_P(REGNO) \ ++ ((unsigned int) ((int) (REGNO) - LASX_REG_FIRST) < LASX_REG_NUM) + + #define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X))) + #define LSX_REG_RTX_P(X) (REG_P (X) && LSX_REG_P (REGNO (X))) ++#define LASX_REG_RTX_P(X) (REG_P (X) && LASX_REG_P (REGNO (X))) + + /* Select a register mode required for caller save of hard regno REGNO. */ + #define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \ +@@ -733,6 +746,13 @@ enum reg_class + && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT \ + || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT)) + ++#define LASX_SUPPORTED_MODE_P(MODE) \ ++ (ISA_HAS_LASX \ ++ && (GET_MODE_SIZE (MODE) == UNITS_PER_LSX_REG \ ++ ||GET_MODE_SIZE (MODE) == UNITS_PER_LASX_REG) \ ++ && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT \ ++ || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT)) ++ + /* 1 if N is a possible register number for function argument passing. + We have no FP argument registers when soft-float. */ + +@@ -985,7 +1005,39 @@ typedef struct { + { "vr28", 28 + FP_REG_FIRST }, \ + { "vr29", 29 + FP_REG_FIRST }, \ + { "vr30", 30 + FP_REG_FIRST }, \ +- { "vr31", 31 + FP_REG_FIRST } \ ++ { "vr31", 31 + FP_REG_FIRST }, \ ++ { "xr0", 0 + FP_REG_FIRST }, \ ++ { "xr1", 1 + FP_REG_FIRST }, \ ++ { "xr2", 2 + FP_REG_FIRST }, \ ++ { "xr3", 3 + FP_REG_FIRST }, \ ++ { "xr4", 4 + FP_REG_FIRST }, \ ++ { "xr5", 5 + FP_REG_FIRST }, \ ++ { "xr6", 6 + FP_REG_FIRST }, \ ++ { "xr7", 7 + FP_REG_FIRST }, \ ++ { "xr8", 8 + FP_REG_FIRST }, \ ++ { "xr9", 9 + FP_REG_FIRST }, \ ++ { "xr10", 10 + FP_REG_FIRST }, \ ++ { "xr11", 11 + FP_REG_FIRST }, \ ++ { "xr12", 12 + FP_REG_FIRST }, \ ++ { "xr13", 13 + FP_REG_FIRST }, \ ++ { "xr14", 14 + FP_REG_FIRST }, \ ++ { "xr15", 15 + FP_REG_FIRST }, \ ++ { "xr16", 16 + FP_REG_FIRST }, \ ++ { "xr17", 17 + FP_REG_FIRST }, \ ++ { "xr18", 18 + FP_REG_FIRST }, \ ++ { "xr19", 19 + FP_REG_FIRST }, \ ++ { "xr20", 20 + FP_REG_FIRST }, \ ++ { "xr21", 21 + FP_REG_FIRST }, \ ++ { "xr22", 22 + FP_REG_FIRST }, \ ++ { "xr23", 23 + FP_REG_FIRST }, \ ++ { "xr24", 24 + FP_REG_FIRST }, \ ++ { "xr25", 25 + FP_REG_FIRST }, \ ++ { "xr26", 26 + FP_REG_FIRST }, \ ++ { "xr27", 27 + FP_REG_FIRST }, \ ++ { "xr28", 28 + FP_REG_FIRST }, \ ++ { "xr29", 29 + FP_REG_FIRST }, \ ++ { "xr30", 30 + FP_REG_FIRST }, \ ++ { "xr31", 31 + FP_REG_FIRST } \ + } + + /* Globalizing directive for a label. */ +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index fb3828262..3dde0ceb1 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -163,7 +163,7 @@ + + ;; Main data type used by the insn + (define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FCC, +- V2DI,V4SI,V8HI,V16QI,V2DF,V4SF" ++ V2DI,V4SI,V8HI,V16QI,V2DF,V4SF,V4DI,V8SI,V16HI,V32QI,V4DF,V8SF" + (const_string "unknown")) + + ;; True if the main data type is twice the size of a word. +@@ -422,12 +422,14 @@ + ;; floating-point mode or vector mode. + (define_mode_attr UNITMODE [(SF "SF") (DF "DF") (V2SF "SF") (V4SF "SF") + (V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI") +- (V2DF "DF")]) ++ (V2DF "DF")(V8SF "SF")(V32QI "QI")(V16HI "HI")(V8SI "SI")(V4DI "DI")(V4DF "DF")]) + + ;; As above, but in lower case. + (define_mode_attr unitmode [(SF "sf") (DF "df") (V2SF "sf") (V4SF "sf") + (V16QI "qi") (V8QI "qi") (V8HI "hi") (V4HI "hi") +- (V4SI "si") (V2SI "si") (V2DI "di") (V2DF "df")]) ++ (V4SI "si") (V2SI "si") (V2DI "di") (V2DF "df") ++ (V8SI "si") (V4DI "di") (V32QI "qi") (V16HI "hi") ++ (V8SF "sf") (V4DF "df")]) + + ;; This attribute gives the integer mode that has half the size of + ;; the controlling mode. +@@ -711,16 +713,17 @@ + [(set_attr "alu_type" "sub") + (set_attr "mode" "")]) + ++ + (define_insn "*subsi3_extended" +- [(set (match_operand:DI 0 "register_operand" "= r") ++ [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI +- (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ") +- (match_operand:SI 2 "register_operand" " r"))))] ++ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ") ++ (match_operand:SI 2 "register_operand" "r"))))] + "TARGET_64BIT" + "sub.w\t%0,%z1,%2" + [(set_attr "type" "arith") + (set_attr "mode" "SI")]) +- ++ + ;; + ;; .................... + ;; +@@ -3638,6 +3641,9 @@ + ; The LoongArch SX Instructions. + (include "lsx.md") + ++; The LoongArch ASX Instructions. ++(include "lasx.md") ++ + (define_c_enum "unspec" [ + UNSPEC_ADDRESS_FIRST + ]) +-- +2.33.0 + diff --git a/LoongArch-Add-Loongson-ASX-directive-builtin-functio.patch b/LoongArch-Add-Loongson-ASX-directive-builtin-functio.patch new file mode 100644 index 0000000000000000000000000000000000000000..2cc656da753ba5c5b40259bb93ad91b3a6785bbc --- /dev/null +++ b/LoongArch-Add-Loongson-ASX-directive-builtin-functio.patch @@ -0,0 +1,7458 @@ +From 6871a6a4ef5f10bc75a9dd76fff37302057cf528 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Fri, 25 Nov 2022 11:09:49 +0800 +Subject: [PATCH 066/124] LoongArch: Add Loongson ASX directive builtin + function support. + +gcc/ChangeLog: + + * config.gcc: Export the header file lasxintrin.h. + * config/loongarch/loongarch-builtins.cc (enum loongarch_builtin_type): + Add Loongson ASX builtin functions support. + (AVAIL_ALL): Ditto. + (LASX_BUILTIN): Ditto. + (LASX_NO_TARGET_BUILTIN): Ditto. + (LASX_BUILTIN_TEST_BRANCH): Ditto. + (CODE_FOR_lasx_xvsadd_b): Ditto. + (CODE_FOR_lasx_xvsadd_h): Ditto. + (CODE_FOR_lasx_xvsadd_w): Ditto. + (CODE_FOR_lasx_xvsadd_d): Ditto. + (CODE_FOR_lasx_xvsadd_bu): Ditto. + (CODE_FOR_lasx_xvsadd_hu): Ditto. + (CODE_FOR_lasx_xvsadd_wu): Ditto. + (CODE_FOR_lasx_xvsadd_du): Ditto. + (CODE_FOR_lasx_xvadd_b): Ditto. + (CODE_FOR_lasx_xvadd_h): Ditto. + (CODE_FOR_lasx_xvadd_w): Ditto. + (CODE_FOR_lasx_xvadd_d): Ditto. + (CODE_FOR_lasx_xvaddi_bu): Ditto. + (CODE_FOR_lasx_xvaddi_hu): Ditto. + (CODE_FOR_lasx_xvaddi_wu): Ditto. + (CODE_FOR_lasx_xvaddi_du): Ditto. + (CODE_FOR_lasx_xvand_v): Ditto. + (CODE_FOR_lasx_xvandi_b): Ditto. + (CODE_FOR_lasx_xvbitsel_v): Ditto. + (CODE_FOR_lasx_xvseqi_b): Ditto. + (CODE_FOR_lasx_xvseqi_h): Ditto. + (CODE_FOR_lasx_xvseqi_w): Ditto. + (CODE_FOR_lasx_xvseqi_d): Ditto. + (CODE_FOR_lasx_xvslti_b): Ditto. + (CODE_FOR_lasx_xvslti_h): Ditto. + (CODE_FOR_lasx_xvslti_w): Ditto. + (CODE_FOR_lasx_xvslti_d): Ditto. + (CODE_FOR_lasx_xvslti_bu): Ditto. + (CODE_FOR_lasx_xvslti_hu): Ditto. + (CODE_FOR_lasx_xvslti_wu): Ditto. + (CODE_FOR_lasx_xvslti_du): Ditto. + (CODE_FOR_lasx_xvslei_b): Ditto. + (CODE_FOR_lasx_xvslei_h): Ditto. + (CODE_FOR_lasx_xvslei_w): Ditto. + (CODE_FOR_lasx_xvslei_d): Ditto. + (CODE_FOR_lasx_xvslei_bu): Ditto. + (CODE_FOR_lasx_xvslei_hu): Ditto. + (CODE_FOR_lasx_xvslei_wu): Ditto. + (CODE_FOR_lasx_xvslei_du): Ditto. + (CODE_FOR_lasx_xvdiv_b): Ditto. + (CODE_FOR_lasx_xvdiv_h): Ditto. + (CODE_FOR_lasx_xvdiv_w): Ditto. + (CODE_FOR_lasx_xvdiv_d): Ditto. + (CODE_FOR_lasx_xvdiv_bu): Ditto. + (CODE_FOR_lasx_xvdiv_hu): Ditto. + (CODE_FOR_lasx_xvdiv_wu): Ditto. + (CODE_FOR_lasx_xvdiv_du): Ditto. + (CODE_FOR_lasx_xvfadd_s): Ditto. + (CODE_FOR_lasx_xvfadd_d): Ditto. + (CODE_FOR_lasx_xvftintrz_w_s): Ditto. + (CODE_FOR_lasx_xvftintrz_l_d): Ditto. + (CODE_FOR_lasx_xvftintrz_wu_s): Ditto. + (CODE_FOR_lasx_xvftintrz_lu_d): Ditto. + (CODE_FOR_lasx_xvffint_s_w): Ditto. + (CODE_FOR_lasx_xvffint_d_l): Ditto. + (CODE_FOR_lasx_xvffint_s_wu): Ditto. + (CODE_FOR_lasx_xvffint_d_lu): Ditto. + (CODE_FOR_lasx_xvfsub_s): Ditto. + (CODE_FOR_lasx_xvfsub_d): Ditto. + (CODE_FOR_lasx_xvfmul_s): Ditto. + (CODE_FOR_lasx_xvfmul_d): Ditto. + (CODE_FOR_lasx_xvfdiv_s): Ditto. + (CODE_FOR_lasx_xvfdiv_d): Ditto. + (CODE_FOR_lasx_xvfmax_s): Ditto. + (CODE_FOR_lasx_xvfmax_d): Ditto. + (CODE_FOR_lasx_xvfmin_s): Ditto. + (CODE_FOR_lasx_xvfmin_d): Ditto. + (CODE_FOR_lasx_xvfsqrt_s): Ditto. + (CODE_FOR_lasx_xvfsqrt_d): Ditto. + (CODE_FOR_lasx_xvflogb_s): Ditto. + (CODE_FOR_lasx_xvflogb_d): Ditto. + (CODE_FOR_lasx_xvmax_b): Ditto. + (CODE_FOR_lasx_xvmax_h): Ditto. + (CODE_FOR_lasx_xvmax_w): Ditto. + (CODE_FOR_lasx_xvmax_d): Ditto. + (CODE_FOR_lasx_xvmaxi_b): Ditto. + (CODE_FOR_lasx_xvmaxi_h): Ditto. + (CODE_FOR_lasx_xvmaxi_w): Ditto. + (CODE_FOR_lasx_xvmaxi_d): Ditto. + (CODE_FOR_lasx_xvmax_bu): Ditto. + (CODE_FOR_lasx_xvmax_hu): Ditto. + (CODE_FOR_lasx_xvmax_wu): Ditto. + (CODE_FOR_lasx_xvmax_du): Ditto. + (CODE_FOR_lasx_xvmaxi_bu): Ditto. + (CODE_FOR_lasx_xvmaxi_hu): Ditto. + (CODE_FOR_lasx_xvmaxi_wu): Ditto. + (CODE_FOR_lasx_xvmaxi_du): Ditto. + (CODE_FOR_lasx_xvmin_b): Ditto. + (CODE_FOR_lasx_xvmin_h): Ditto. + (CODE_FOR_lasx_xvmin_w): Ditto. + (CODE_FOR_lasx_xvmin_d): Ditto. + (CODE_FOR_lasx_xvmini_b): Ditto. + (CODE_FOR_lasx_xvmini_h): Ditto. + (CODE_FOR_lasx_xvmini_w): Ditto. + (CODE_FOR_lasx_xvmini_d): Ditto. + (CODE_FOR_lasx_xvmin_bu): Ditto. + (CODE_FOR_lasx_xvmin_hu): Ditto. + (CODE_FOR_lasx_xvmin_wu): Ditto. + (CODE_FOR_lasx_xvmin_du): Ditto. + (CODE_FOR_lasx_xvmini_bu): Ditto. + (CODE_FOR_lasx_xvmini_hu): Ditto. + (CODE_FOR_lasx_xvmini_wu): Ditto. + (CODE_FOR_lasx_xvmini_du): Ditto. + (CODE_FOR_lasx_xvmod_b): Ditto. + (CODE_FOR_lasx_xvmod_h): Ditto. + (CODE_FOR_lasx_xvmod_w): Ditto. + (CODE_FOR_lasx_xvmod_d): Ditto. + (CODE_FOR_lasx_xvmod_bu): Ditto. + (CODE_FOR_lasx_xvmod_hu): Ditto. + (CODE_FOR_lasx_xvmod_wu): Ditto. + (CODE_FOR_lasx_xvmod_du): Ditto. + (CODE_FOR_lasx_xvmul_b): Ditto. + (CODE_FOR_lasx_xvmul_h): Ditto. + (CODE_FOR_lasx_xvmul_w): Ditto. + (CODE_FOR_lasx_xvmul_d): Ditto. + (CODE_FOR_lasx_xvclz_b): Ditto. + (CODE_FOR_lasx_xvclz_h): Ditto. + (CODE_FOR_lasx_xvclz_w): Ditto. + (CODE_FOR_lasx_xvclz_d): Ditto. + (CODE_FOR_lasx_xvnor_v): Ditto. + (CODE_FOR_lasx_xvor_v): Ditto. + (CODE_FOR_lasx_xvori_b): Ditto. + (CODE_FOR_lasx_xvnori_b): Ditto. + (CODE_FOR_lasx_xvpcnt_b): Ditto. + (CODE_FOR_lasx_xvpcnt_h): Ditto. + (CODE_FOR_lasx_xvpcnt_w): Ditto. + (CODE_FOR_lasx_xvpcnt_d): Ditto. + (CODE_FOR_lasx_xvxor_v): Ditto. + (CODE_FOR_lasx_xvxori_b): Ditto. + (CODE_FOR_lasx_xvsll_b): Ditto. + (CODE_FOR_lasx_xvsll_h): Ditto. + (CODE_FOR_lasx_xvsll_w): Ditto. + (CODE_FOR_lasx_xvsll_d): Ditto. + (CODE_FOR_lasx_xvslli_b): Ditto. + (CODE_FOR_lasx_xvslli_h): Ditto. + (CODE_FOR_lasx_xvslli_w): Ditto. + (CODE_FOR_lasx_xvslli_d): Ditto. + (CODE_FOR_lasx_xvsra_b): Ditto. + (CODE_FOR_lasx_xvsra_h): Ditto. + (CODE_FOR_lasx_xvsra_w): Ditto. + (CODE_FOR_lasx_xvsra_d): Ditto. + (CODE_FOR_lasx_xvsrai_b): Ditto. + (CODE_FOR_lasx_xvsrai_h): Ditto. + (CODE_FOR_lasx_xvsrai_w): Ditto. + (CODE_FOR_lasx_xvsrai_d): Ditto. + (CODE_FOR_lasx_xvsrl_b): Ditto. + (CODE_FOR_lasx_xvsrl_h): Ditto. + (CODE_FOR_lasx_xvsrl_w): Ditto. + (CODE_FOR_lasx_xvsrl_d): Ditto. + (CODE_FOR_lasx_xvsrli_b): Ditto. + (CODE_FOR_lasx_xvsrli_h): Ditto. + (CODE_FOR_lasx_xvsrli_w): Ditto. + (CODE_FOR_lasx_xvsrli_d): Ditto. + (CODE_FOR_lasx_xvsub_b): Ditto. + (CODE_FOR_lasx_xvsub_h): Ditto. + (CODE_FOR_lasx_xvsub_w): Ditto. + (CODE_FOR_lasx_xvsub_d): Ditto. + (CODE_FOR_lasx_xvsubi_bu): Ditto. + (CODE_FOR_lasx_xvsubi_hu): Ditto. + (CODE_FOR_lasx_xvsubi_wu): Ditto. + (CODE_FOR_lasx_xvsubi_du): Ditto. + (CODE_FOR_lasx_xvpackod_d): Ditto. + (CODE_FOR_lasx_xvpackev_d): Ditto. + (CODE_FOR_lasx_xvpickod_d): Ditto. + (CODE_FOR_lasx_xvpickev_d): Ditto. + (CODE_FOR_lasx_xvrepli_b): Ditto. + (CODE_FOR_lasx_xvrepli_h): Ditto. + (CODE_FOR_lasx_xvrepli_w): Ditto. + (CODE_FOR_lasx_xvrepli_d): Ditto. + (CODE_FOR_lasx_xvandn_v): Ditto. + (CODE_FOR_lasx_xvorn_v): Ditto. + (CODE_FOR_lasx_xvneg_b): Ditto. + (CODE_FOR_lasx_xvneg_h): Ditto. + (CODE_FOR_lasx_xvneg_w): Ditto. + (CODE_FOR_lasx_xvneg_d): Ditto. + (CODE_FOR_lasx_xvbsrl_v): Ditto. + (CODE_FOR_lasx_xvbsll_v): Ditto. + (CODE_FOR_lasx_xvfmadd_s): Ditto. + (CODE_FOR_lasx_xvfmadd_d): Ditto. + (CODE_FOR_lasx_xvfmsub_s): Ditto. + (CODE_FOR_lasx_xvfmsub_d): Ditto. + (CODE_FOR_lasx_xvfnmadd_s): Ditto. + (CODE_FOR_lasx_xvfnmadd_d): Ditto. + (CODE_FOR_lasx_xvfnmsub_s): Ditto. + (CODE_FOR_lasx_xvfnmsub_d): Ditto. + (CODE_FOR_lasx_xvpermi_q): Ditto. + (CODE_FOR_lasx_xvpermi_d): Ditto. + (CODE_FOR_lasx_xbnz_v): Ditto. + (CODE_FOR_lasx_xbz_v): Ditto. + (CODE_FOR_lasx_xvssub_b): Ditto. + (CODE_FOR_lasx_xvssub_h): Ditto. + (CODE_FOR_lasx_xvssub_w): Ditto. + (CODE_FOR_lasx_xvssub_d): Ditto. + (CODE_FOR_lasx_xvssub_bu): Ditto. + (CODE_FOR_lasx_xvssub_hu): Ditto. + (CODE_FOR_lasx_xvssub_wu): Ditto. + (CODE_FOR_lasx_xvssub_du): Ditto. + (CODE_FOR_lasx_xvabsd_b): Ditto. + (CODE_FOR_lasx_xvabsd_h): Ditto. + (CODE_FOR_lasx_xvabsd_w): Ditto. + (CODE_FOR_lasx_xvabsd_d): Ditto. + (CODE_FOR_lasx_xvabsd_bu): Ditto. + (CODE_FOR_lasx_xvabsd_hu): Ditto. + (CODE_FOR_lasx_xvabsd_wu): Ditto. + (CODE_FOR_lasx_xvabsd_du): Ditto. + (CODE_FOR_lasx_xvavg_b): Ditto. + (CODE_FOR_lasx_xvavg_h): Ditto. + (CODE_FOR_lasx_xvavg_w): Ditto. + (CODE_FOR_lasx_xvavg_d): Ditto. + (CODE_FOR_lasx_xvavg_bu): Ditto. + (CODE_FOR_lasx_xvavg_hu): Ditto. + (CODE_FOR_lasx_xvavg_wu): Ditto. + (CODE_FOR_lasx_xvavg_du): Ditto. + (CODE_FOR_lasx_xvavgr_b): Ditto. + (CODE_FOR_lasx_xvavgr_h): Ditto. + (CODE_FOR_lasx_xvavgr_w): Ditto. + (CODE_FOR_lasx_xvavgr_d): Ditto. + (CODE_FOR_lasx_xvavgr_bu): Ditto. + (CODE_FOR_lasx_xvavgr_hu): Ditto. + (CODE_FOR_lasx_xvavgr_wu): Ditto. + (CODE_FOR_lasx_xvavgr_du): Ditto. + (CODE_FOR_lasx_xvmuh_b): Ditto. + (CODE_FOR_lasx_xvmuh_h): Ditto. + (CODE_FOR_lasx_xvmuh_w): Ditto. + (CODE_FOR_lasx_xvmuh_d): Ditto. + (CODE_FOR_lasx_xvmuh_bu): Ditto. + (CODE_FOR_lasx_xvmuh_hu): Ditto. + (CODE_FOR_lasx_xvmuh_wu): Ditto. + (CODE_FOR_lasx_xvmuh_du): Ditto. + (CODE_FOR_lasx_xvssran_b_h): Ditto. + (CODE_FOR_lasx_xvssran_h_w): Ditto. + (CODE_FOR_lasx_xvssran_w_d): Ditto. + (CODE_FOR_lasx_xvssran_bu_h): Ditto. + (CODE_FOR_lasx_xvssran_hu_w): Ditto. + (CODE_FOR_lasx_xvssran_wu_d): Ditto. + (CODE_FOR_lasx_xvssrarn_b_h): Ditto. + (CODE_FOR_lasx_xvssrarn_h_w): Ditto. + (CODE_FOR_lasx_xvssrarn_w_d): Ditto. + (CODE_FOR_lasx_xvssrarn_bu_h): Ditto. + (CODE_FOR_lasx_xvssrarn_hu_w): Ditto. + (CODE_FOR_lasx_xvssrarn_wu_d): Ditto. + (CODE_FOR_lasx_xvssrln_bu_h): Ditto. + (CODE_FOR_lasx_xvssrln_hu_w): Ditto. + (CODE_FOR_lasx_xvssrln_wu_d): Ditto. + (CODE_FOR_lasx_xvssrlrn_bu_h): Ditto. + (CODE_FOR_lasx_xvssrlrn_hu_w): Ditto. + (CODE_FOR_lasx_xvssrlrn_wu_d): Ditto. + (CODE_FOR_lasx_xvftint_w_s): Ditto. + (CODE_FOR_lasx_xvftint_l_d): Ditto. + (CODE_FOR_lasx_xvftint_wu_s): Ditto. + (CODE_FOR_lasx_xvftint_lu_d): Ditto. + (CODE_FOR_lasx_xvsllwil_h_b): Ditto. + (CODE_FOR_lasx_xvsllwil_w_h): Ditto. + (CODE_FOR_lasx_xvsllwil_d_w): Ditto. + (CODE_FOR_lasx_xvsllwil_hu_bu): Ditto. + (CODE_FOR_lasx_xvsllwil_wu_hu): Ditto. + (CODE_FOR_lasx_xvsllwil_du_wu): Ditto. + (CODE_FOR_lasx_xvsat_b): Ditto. + (CODE_FOR_lasx_xvsat_h): Ditto. + (CODE_FOR_lasx_xvsat_w): Ditto. + (CODE_FOR_lasx_xvsat_d): Ditto. + (CODE_FOR_lasx_xvsat_bu): Ditto. + (CODE_FOR_lasx_xvsat_hu): Ditto. + (CODE_FOR_lasx_xvsat_wu): Ditto. + (CODE_FOR_lasx_xvsat_du): Ditto. + (loongarch_builtin_vectorized_function): Ditto. + (loongarch_expand_builtin_insn): Ditto. + (loongarch_expand_builtin): Ditto. + * config/loongarch/loongarch-ftypes.def (1): Ditto. + (2): Ditto. + (3): Ditto. + (4): Ditto. + * config/loongarch/lasxintrin.h: New file. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config.gcc | 2 +- + gcc/config/loongarch/lasxintrin.h | 5338 ++++++++++++++++++++ + gcc/config/loongarch/loongarch-builtins.cc | 1180 ++++- + gcc/config/loongarch/loongarch-ftypes.def | 271 +- + 4 files changed, 6788 insertions(+), 3 deletions(-) + create mode 100644 gcc/config/loongarch/lasxintrin.h + +diff --git a/gcc/config.gcc b/gcc/config.gcc +index 4e149e0ef..19f584344 100644 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -456,7 +456,7 @@ mips*-*-*) + ;; + loongarch*-*-*) + cpu_type=loongarch +- extra_headers="larchintrin.h lsxintrin.h" ++ extra_headers="larchintrin.h lsxintrin.h lasxintrin.h" + extra_objs="loongarch-c.o loongarch-builtins.o loongarch-cpu.o loongarch-opts.o loongarch-def.o" + extra_gcc_objs="loongarch-driver.o loongarch-cpu.o loongarch-opts.o loongarch-def.o" + extra_options="${extra_options} g.opt fused-madd.opt" +diff --git a/gcc/config/loongarch/lasxintrin.h b/gcc/config/loongarch/lasxintrin.h +new file mode 100644 +index 000000000..d39379927 +--- /dev/null ++++ b/gcc/config/loongarch/lasxintrin.h +@@ -0,0 +1,5338 @@ ++/* LARCH Loongson ASX intrinsics include file. ++ ++ Copyright (C) 2018 Free Software Foundation, Inc. ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published ++ by the Free Software Foundation; either version 3, or (at your ++ option) any later version. ++ ++ GCC is distributed in the hope that it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++ License for more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++#ifndef _GCC_LOONGSON_ASXINTRIN_H ++#define _GCC_LOONGSON_ASXINTRIN_H 1 ++ ++#if defined(__loongarch_asx) ++ ++typedef signed char v32i8 __attribute__ ((vector_size(32), aligned(32))); ++typedef signed char v32i8_b __attribute__ ((vector_size(32), aligned(1))); ++typedef unsigned char v32u8 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned char v32u8_b __attribute__ ((vector_size(32), aligned(1))); ++typedef short v16i16 __attribute__ ((vector_size(32), aligned(32))); ++typedef short v16i16_h __attribute__ ((vector_size(32), aligned(2))); ++typedef unsigned short v16u16 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned short v16u16_h __attribute__ ((vector_size(32), aligned(2))); ++typedef int v8i32 __attribute__ ((vector_size(32), aligned(32))); ++typedef int v8i32_w __attribute__ ((vector_size(32), aligned(4))); ++typedef unsigned int v8u32 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned int v8u32_w __attribute__ ((vector_size(32), aligned(4))); ++typedef long long v4i64 __attribute__ ((vector_size(32), aligned(32))); ++typedef long long v4i64_d __attribute__ ((vector_size(32), aligned(8))); ++typedef unsigned long long v4u64 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned long long v4u64_d __attribute__ ((vector_size(32), aligned(8))); ++typedef float v8f32 __attribute__ ((vector_size(32), aligned(32))); ++typedef float v8f32_w __attribute__ ((vector_size(32), aligned(4))); ++typedef double v4f64 __attribute__ ((vector_size(32), aligned(32))); ++typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8))); ++typedef float __m256 __attribute__ ((__vector_size__ (32), ++ __may_alias__)); ++typedef long long __m256i __attribute__ ((__vector_size__ (32), ++ __may_alias__)); ++typedef double __m256d __attribute__ ((__vector_size__ (32), ++ __may_alias__)); ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsll_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsll_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsll_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsll_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsll_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsll_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsll_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsll_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvslli_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslli_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvslli_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslli_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvslli_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslli_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvslli_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslli_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsra_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsra_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsra_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsra_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsra_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsra_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsra_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsra_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsrai_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrai_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsrai_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrai_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsrai_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrai_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsrai_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrai_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrar_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrar_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrar_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrar_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrar_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrar_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrar_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrar_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsrari_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrari_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsrari_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrari_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsrari_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrari_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsrari_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrari_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrl_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrl_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrl_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrl_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrl_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrl_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrl_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrl_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsrli_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrli_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsrli_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrli_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsrli_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrli_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsrli_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrli_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlr_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlr_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlr_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlr_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlr_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlr_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlr_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlr_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsrlri_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrlri_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsrlri_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrlri_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsrlri_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrlri_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsrlri_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsrlri_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitclr_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitclr_b ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitclr_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitclr_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitclr_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitclr_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitclr_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitclr_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvbitclri_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitclri_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvbitclri_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitclri_h ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvbitclri_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitclri_w ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvbitclri_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitclri_d ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitset_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitset_b ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitset_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitset_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitset_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitset_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitset_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitset_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvbitseti_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitseti_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvbitseti_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitseti_h ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvbitseti_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitseti_w ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvbitseti_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitseti_d ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitrev_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitrev_b ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitrev_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitrev_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitrev_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitrev_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitrev_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitrev_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvbitrevi_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitrevi_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvbitrevi_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitrevi_h ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvbitrevi_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitrevi_w ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvbitrevi_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbitrevi_d ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvaddi_bu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvaddi_bu ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvaddi_hu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvaddi_hu ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvaddi_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvaddi_wu ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvaddi_du(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvaddi_du ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsubi_bu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsubi_bu ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsubi_hu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsubi_hu ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsubi_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsubi_wu ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsubi_du(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsubi_du ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvmaxi_b(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvmaxi_h(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvmaxi_w(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvmaxi_d(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvmaxi_bu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_bu ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvmaxi_hu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_hu ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvmaxi_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_wu ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvmaxi_du(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmaxi_du ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvmini_b(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvmini_h(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvmini_w(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvmini_d(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvmini_bu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_bu ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvmini_hu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_hu ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvmini_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_wu ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvmini_du(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvmini_du ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvseq_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvseq_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvseq_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvseq_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvseq_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvseq_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvseq_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvseq_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvseqi_b(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvseqi_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvseqi_h(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvseqi_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvseqi_w(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvseqi_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvseqi_d(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvseqi_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvslti_b(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvslti_h(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvslti_w(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvslti_d(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, UV32QI, UQI. */ ++#define __lasx_xvslti_bu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_bu ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, UV16HI, UQI. */ ++#define __lasx_xvslti_hu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_hu ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, UV8SI, UQI. */ ++#define __lasx_xvslti_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_wu ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, UV4DI, UQI. */ ++#define __lasx_xvslti_du(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslti_du ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvslei_b(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvslei_h(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvslei_w(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvslei_d(/*__m256i*/ _1, /*si5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, UV32QI, UQI. */ ++#define __lasx_xvslei_bu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_bu ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, UV16HI, UQI. */ ++#define __lasx_xvslei_hu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_hu ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, UV8SI, UQI. */ ++#define __lasx_xvslei_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_wu ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, UV4DI, UQI. */ ++#define __lasx_xvslei_du(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvslei_du ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsat_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsat_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsat_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsat_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvsat_bu(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_bu ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvsat_hu(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_hu ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvsat_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_wu ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvsat_du(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsat_du ((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadda_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadda_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadda_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadda_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadda_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadda_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadda_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadda_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmul_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmul_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmul_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmul_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmul_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmul_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmul_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmul_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmadd_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmadd_b ((v32i8)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmadd_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmadd_h ((v16i16)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmadd_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmadd_w ((v8i32)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmadd_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmadd_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsub_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmsub_b ((v32i8)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsub_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmsub_h ((v16i16)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsub_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmsub_w ((v8i32)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsub_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmsub_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_hu_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_hu_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_wu_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_wu_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_du_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_du_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_hu_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_hu_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_wu_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_wu_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_du_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_du_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvrepl128vei_b(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrepl128vei_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvrepl128vei_h(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrepl128vei_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui2. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvrepl128vei_w(/*__m256i*/ _1, /*ui2*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrepl128vei_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui1. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvrepl128vei_d(/*__m256i*/ _1, /*ui1*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrepl128vei_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickev_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickev_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickev_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickev_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickev_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickev_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickev_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickev_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickod_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickod_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickod_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickod_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickod_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickod_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickod_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickod_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvh_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvh_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvh_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvh_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvh_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvh_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvh_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvh_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvl_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvl_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvl_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvl_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvl_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvl_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvl_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvl_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackev_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackev_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackev_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackev_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackev_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackev_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackev_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackev_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackod_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackod_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackod_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackod_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackod_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackod_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackod_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackod_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvshuf_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvshuf_b ((v32i8)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvshuf_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvshuf_h ((v16i16)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvshuf_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvshuf_w ((v8i32)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvshuf_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvshuf_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvand_v (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvand_v ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvandi_b(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvandi_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvor_v (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvor_v ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvori_b(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvori_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvnor_v (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvnor_v ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvnori_b(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvnori_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvxor_v (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvxor_v ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvxori_b(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvxori_b ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitsel_v (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvbitsel_v ((v32u8)_1, (v32u8)_2, (v32u8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI, USI. */ ++#define __lasx_xvbitseli_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvbitseli_b ((v32u8)(_1), (v32u8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V32QI, V32QI, USI. */ ++#define __lasx_xvshuf4i_b(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvshuf4i_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V16HI, V16HI, USI. */ ++#define __lasx_xvshuf4i_h(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvshuf4i_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V8SI, V8SI, USI. */ ++#define __lasx_xvshuf4i_w(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvshuf4i_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj. */ ++/* Data types in instruction templates: V32QI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplgr2vr_b (int _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplgr2vr_b ((int)_1); ++} ++ ++/* Assembly instruction format: xd, rj. */ ++/* Data types in instruction templates: V16HI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplgr2vr_h (int _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplgr2vr_h ((int)_1); ++} ++ ++/* Assembly instruction format: xd, rj. */ ++/* Data types in instruction templates: V8SI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplgr2vr_w (int _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplgr2vr_w ((int)_1); ++} ++ ++/* Assembly instruction format: xd, rj. */ ++/* Data types in instruction templates: V4DI, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplgr2vr_d (long int _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplgr2vr_d ((long int)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpcnt_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvpcnt_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpcnt_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvpcnt_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpcnt_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvpcnt_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpcnt_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvpcnt_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclo_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclo_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclo_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclo_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclo_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclo_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclo_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclo_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclz_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclz_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclz_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclz_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclz_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclz_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclz_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclz_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfadd_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfadd_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfadd_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfadd_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfsub_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfsub_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfsub_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfsub_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmul_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmul_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmul_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmul_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfdiv_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfdiv_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfdiv_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfdiv_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcvt_h_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcvt_h_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfcvt_s_d (__m256d _1, __m256d _2) ++{ ++ return (__m256)__builtin_lasx_xvfcvt_s_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmin_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmin_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmin_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmin_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmina_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmina_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmina_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmina_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmax_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmax_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmax_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmax_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmaxa_s (__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmaxa_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmaxa_d (__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmaxa_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfclass_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvfclass_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfclass_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvfclass_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfsqrt_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfsqrt_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfsqrt_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfsqrt_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrecip_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrecip_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrecip_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrecip_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrint_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrint_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrint_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrint_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrsqrt_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrsqrt_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrsqrt_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrsqrt_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvflogb_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvflogb_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvflogb_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvflogb_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfcvth_s_h (__m256i _1) ++{ ++ return (__m256)__builtin_lasx_xvfcvth_s_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfcvth_d_s (__m256 _1) ++{ ++ return (__m256d)__builtin_lasx_xvfcvth_d_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfcvtl_s_h (__m256i _1) ++{ ++ return (__m256)__builtin_lasx_xvfcvtl_s_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfcvtl_d_s (__m256 _1) ++{ ++ return (__m256d)__builtin_lasx_xvfcvtl_d_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_w_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftint_w_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_l_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftint_l_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_wu_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftint_wu_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_lu_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftint_lu_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_w_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_w_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_l_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_l_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_wu_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_wu_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_lu_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_lu_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvffint_s_w (__m256i _1) ++{ ++ return (__m256)__builtin_lasx_xvffint_s_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvffint_d_l (__m256i _1) ++{ ++ return (__m256d)__builtin_lasx_xvffint_d_l ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvffint_s_wu (__m256i _1) ++{ ++ return (__m256)__builtin_lasx_xvffint_s_wu ((v8u32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvffint_d_lu (__m256i _1) ++{ ++ return (__m256d)__builtin_lasx_xvffint_d_lu ((v4u64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, rk. */ ++/* Data types in instruction templates: V32QI, V32QI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve_b (__m256i _1, int _2) ++{ ++ return (__m256i)__builtin_lasx_xvreplve_b ((v32i8)_1, (int)_2); ++} ++ ++/* Assembly instruction format: xd, xj, rk. */ ++/* Data types in instruction templates: V16HI, V16HI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve_h (__m256i _1, int _2) ++{ ++ return (__m256i)__builtin_lasx_xvreplve_h ((v16i16)_1, (int)_2); ++} ++ ++/* Assembly instruction format: xd, xj, rk. */ ++/* Data types in instruction templates: V8SI, V8SI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve_w (__m256i _1, int _2) ++{ ++ return (__m256i)__builtin_lasx_xvreplve_w ((v8i32)_1, (int)_2); ++} ++ ++/* Assembly instruction format: xd, xj, rk. */ ++/* Data types in instruction templates: V4DI, V4DI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve_d (__m256i _1, int _2) ++{ ++ return (__m256i)__builtin_lasx_xvreplve_d ((v4i64)_1, (int)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvpermi_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvpermi_w ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvandn_v (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvandn_v ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvneg_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvneg_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvneg_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvneg_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvneg_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvneg_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvneg_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvneg_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V16HI, V32QI, UQI. */ ++#define __lasx_xvsllwil_h_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsllwil_h_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V8SI, V16HI, UQI. */ ++#define __lasx_xvsllwil_w_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsllwil_w_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, V8SI, UQI. */ ++#define __lasx_xvsllwil_d_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsllwil_d_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV16HI, UV32QI, UQI. */ ++#define __lasx_xvsllwil_hu_bu(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsllwil_hu_bu ((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV8SI, UV16HI, UQI. */ ++#define __lasx_xvsllwil_wu_hu(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsllwil_wu_hu ((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV4DI, UV8SI, UQI. */ ++#define __lasx_xvsllwil_du_wu(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvsllwil_du_wu ((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsran_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsran_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsran_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsran_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsran_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsran_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_bu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_bu_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_hu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_hu_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_wu_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_wu_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrarn_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrarn_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrarn_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrarn_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrarn_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrarn_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_bu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_bu_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_hu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_hu_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_wu_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_wu_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrln_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrln_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrln_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrln_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrln_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrln_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_bu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_bu_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_hu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_hu_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_wu_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_wu_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlrn_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlrn_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlrn_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlrn_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlrn_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlrn_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_bu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_bu_h ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_hu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_hu_w ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_wu_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_wu_d ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, UQI. */ ++#define __lasx_xvfrstpi_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvfrstpi_b ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, UQI. */ ++#define __lasx_xvfrstpi_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvfrstpi_h ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrstp_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvfrstp_b ((v32i8)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrstp_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvfrstp_h ((v16i16)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvshuf4i_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvshuf4i_d ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvbsrl_v(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbsrl_v ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvbsll_v(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvbsll_v ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvextrins_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvextrins_b ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvextrins_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvextrins_h ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvextrins_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvextrins_w ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvextrins_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvextrins_d ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskltz_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskltz_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskltz_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskltz_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskltz_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskltz_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskltz_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskltz_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsigncov_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsigncov_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsigncov_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsigncov_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsigncov_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsigncov_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsigncov_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsigncov_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmadd_s (__m256 _1, __m256 _2, __m256 _3) ++{ ++ return (__m256)__builtin_lasx_xvfmadd_s ((v8f32)_1, (v8f32)_2, (v8f32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmadd_d (__m256d _1, __m256d _2, __m256d _3) ++{ ++ return (__m256d)__builtin_lasx_xvfmadd_d ((v4f64)_1, (v4f64)_2, (v4f64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmsub_s (__m256 _1, __m256 _2, __m256 _3) ++{ ++ return (__m256)__builtin_lasx_xvfmsub_s ((v8f32)_1, (v8f32)_2, (v8f32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmsub_d (__m256d _1, __m256d _2, __m256d _3) ++{ ++ return (__m256d)__builtin_lasx_xvfmsub_d ((v4f64)_1, (v4f64)_2, (v4f64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfnmadd_s (__m256 _1, __m256 _2, __m256 _3) ++{ ++ return (__m256)__builtin_lasx_xvfnmadd_s ((v8f32)_1, (v8f32)_2, (v8f32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfnmadd_d (__m256d _1, __m256d _2, __m256d _3) ++{ ++ return (__m256d)__builtin_lasx_xvfnmadd_d ((v4f64)_1, (v4f64)_2, (v4f64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfnmsub_s (__m256 _1, __m256 _2, __m256 _3) ++{ ++ return (__m256)__builtin_lasx_xvfnmsub_s ((v8f32)_1, (v8f32)_2, (v8f32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfnmsub_d (__m256d _1, __m256d _2, __m256d _3) ++{ ++ return (__m256d)__builtin_lasx_xvfnmsub_d ((v4f64)_1, (v4f64)_2, (v4f64)_3); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrne_w_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrne_w_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrne_l_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrne_l_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrp_w_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrp_w_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrp_l_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrp_l_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrm_w_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrm_w_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrm_l_d (__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrm_l_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_w_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftint_w_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvffint_s_l (__m256i _1, __m256i _2) ++{ ++ return (__m256)__builtin_lasx_xvffint_s_l ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_w_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_w_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrp_w_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftintrp_w_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrm_w_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftintrm_w_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrne_w_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftintrne_w_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftinth_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftinth_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintl_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintl_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvffinth_d_w (__m256i _1) ++{ ++ return (__m256d)__builtin_lasx_xvffinth_d_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvffintl_d_w (__m256i _1) ++{ ++ return (__m256d)__builtin_lasx_xvffintl_d_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrzh_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrzh_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrzl_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrzl_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrph_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrph_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrpl_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrpl_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrmh_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrmh_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrml_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrml_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrneh_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrneh_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrnel_l_s (__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrnel_l_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrintrne_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrintrne_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrintrne_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrintrne_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrintrz_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrintrz_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrintrz_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrintrz_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrintrp_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrintrp_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrintrp_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrintrp_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrintrm_s (__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrintrm_s ((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrintrm_d (__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrintrm_d ((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, rj, si12. */ ++/* Data types in instruction templates: V32QI, CVPOINTER, SI. */ ++#define __lasx_xvld(/*void **/ _1, /*si12*/ _2) \ ++ ((__m256i)__builtin_lasx_xvld ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj, si12. */ ++/* Data types in instruction templates: VOID, V32QI, CVPOINTER, SI. */ ++#define __lasx_xvst(/*__m256i*/ _1, /*void **/ _2, /*si12*/ _3) \ ++ ((void)__builtin_lasx_xvst ((v32i8)(_1), (void *)(_2), (_3))) ++ ++/* Assembly instruction format: xd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V32QI, CVPOINTER, SI, UQI. */ ++#define __lasx_xvstelm_b(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ ++ ((void)__builtin_lasx_xvstelm_b ((v32i8)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: xd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V16HI, CVPOINTER, SI, UQI. */ ++#define __lasx_xvstelm_h(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ ++ ((void)__builtin_lasx_xvstelm_h ((v16i16)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: xd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V8SI, CVPOINTER, SI, UQI. */ ++#define __lasx_xvstelm_w(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ ++ ((void)__builtin_lasx_xvstelm_w ((v8i32)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: xd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V4DI, CVPOINTER, SI, UQI. */ ++#define __lasx_xvstelm_d(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ ++ ((void)__builtin_lasx_xvstelm_d ((v4i64)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, UQI. */ ++#define __lasx_xvinsve0_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui3*/ _3) \ ++ ((__m256i)__builtin_lasx_xvinsve0_w ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui2. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, UQI. */ ++#define __lasx_xvinsve0_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui2*/ _3) \ ++ ((__m256i)__builtin_lasx_xvinsve0_d ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvpickve_w(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvpickve_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui2. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvpickve_d(/*__m256i*/ _1, /*ui2*/ _2) \ ++ ((__m256i)__builtin_lasx_xvpickve_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_b_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_b_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_h_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_h_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_w_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_w_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvorn_v (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvorn_v ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, i13. */ ++/* Data types in instruction templates: V4DI, HI. */ ++#define __lasx_xvldi(/*i13*/ _1) \ ++ ((__m256i)__builtin_lasx_xvldi ((_1))) ++ ++/* Assembly instruction format: xd, rj, rk. */ ++/* Data types in instruction templates: V32QI, CVPOINTER, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvldx (void * _1, long int _2) ++{ ++ return (__m256i)__builtin_lasx_xvldx ((void *)_1, (long int)_2); ++} ++ ++/* Assembly instruction format: xd, rj, rk. */ ++/* Data types in instruction templates: VOID, V32QI, CVPOINTER, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++void __lasx_xvstx (__m256i _1, void * _2, long int _3) ++{ ++ return (void)__builtin_lasx_xvstx ((v32i8)_1, (void *)_2, (long int)_3); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvextl_qu_du (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvextl_qu_du ((v4u64)_1); ++} ++ ++/* Assembly instruction format: xd, rj, ui3. */ ++/* Data types in instruction templates: V8SI, V8SI, SI, UQI. */ ++#define __lasx_xvinsgr2vr_w(/*__m256i*/ _1, /*int*/ _2, /*ui3*/ _3) \ ++ ((__m256i)__builtin_lasx_xvinsgr2vr_w ((v8i32)(_1), (int)(_2), (_3))) ++ ++/* Assembly instruction format: xd, rj, ui2. */ ++/* Data types in instruction templates: V4DI, V4DI, DI, UQI. */ ++#define __lasx_xvinsgr2vr_d(/*__m256i*/ _1, /*long int*/ _2, /*ui2*/ _3) \ ++ ((__m256i)__builtin_lasx_xvinsgr2vr_d ((v4i64)(_1), (long int)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_q (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_q ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_h_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_h_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_w_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_w_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_d_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_d_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_w_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_w_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_d_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_d_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_d_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_d_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_hu_bu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_hu_bu ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_wu_hu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_wu_hu ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_du_wu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_du_wu ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_wu_bu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_wu_bu ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_du_hu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_du_hu ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_du_bu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_du_bu ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvpermi_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \ ++ ((__m256i)__builtin_lasx_xvpermi_q ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V4DI, V4DI, USI. */ ++#define __lasx_xvpermi_d(/*__m256i*/ _1, /*ui8*/ _2) \ ++ ((__m256i)__builtin_lasx_xvpermi_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvperm_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvperm_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, rj, si12. */ ++/* Data types in instruction templates: V32QI, CVPOINTER, SI. */ ++#define __lasx_xvldrepl_b(/*void **/ _1, /*si12*/ _2) \ ++ ((__m256i)__builtin_lasx_xvldrepl_b ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj, si11. */ ++/* Data types in instruction templates: V16HI, CVPOINTER, SI. */ ++#define __lasx_xvldrepl_h(/*void **/ _1, /*si11*/ _2) \ ++ ((__m256i)__builtin_lasx_xvldrepl_h ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj, si10. */ ++/* Data types in instruction templates: V8SI, CVPOINTER, SI. */ ++#define __lasx_xvldrepl_w(/*void **/ _1, /*si10*/ _2) \ ++ ((__m256i)__builtin_lasx_xvldrepl_w ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj, si9. */ ++/* Data types in instruction templates: V4DI, CVPOINTER, SI. */ ++#define __lasx_xvldrepl_d(/*void **/ _1, /*si9*/ _2) \ ++ ((__m256i)__builtin_lasx_xvldrepl_d ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: rd, xj, ui3. */ ++/* Data types in instruction templates: SI, V8SI, UQI. */ ++#define __lasx_xvpickve2gr_w(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((int)__builtin_lasx_xvpickve2gr_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: rd, xj, ui3. */ ++/* Data types in instruction templates: USI, V8SI, UQI. */ ++#define __lasx_xvpickve2gr_wu(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((unsigned int)__builtin_lasx_xvpickve2gr_wu ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: rd, xj, ui2. */ ++/* Data types in instruction templates: DI, V4DI, UQI. */ ++#define __lasx_xvpickve2gr_d(/*__m256i*/ _1, /*ui2*/ _2) \ ++ ((long int)__builtin_lasx_xvpickve2gr_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: rd, xj, ui2. */ ++/* Data types in instruction templates: UDI, V4DI, UQI. */ ++#define __lasx_xvpickve2gr_du(/*__m256i*/ _1, /*ui2*/ _2) \ ++ ((unsigned long int)__builtin_lasx_xvpickve2gr_du ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_q_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_q_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_d_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_d_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_w_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_w_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_h_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_h_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_q_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_q_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_d_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_d_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_w_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_w_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_h_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_h_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_q_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_q_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_d_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_d_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_w_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_w_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_h_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_h_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_q_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_q_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_d_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_d_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_w_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_w_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_h_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_h_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_q_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_q_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_d_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_d_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_w_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_w_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_h_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_h_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_d_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_d_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_w_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_w_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_h_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_h_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_q_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_q_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_d_wu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_d_wu ((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_w_hu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_w_hu ((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_h_bu (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_h_bu ((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_d_wu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_d_wu_w ((v8u32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_w_hu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_w_hu_h ((v16u16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_h_bu_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_h_bu_b ((v32u8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_d_wu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_d_wu_w ((v8u32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_w_hu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_w_hu_h ((v16u16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_h_bu_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_h_bu_b ((v32u8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_d_wu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_d_wu_w ((v8u32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_w_hu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_w_hu_h ((v16u16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_h_bu_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_h_bu_b ((v32u8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_d_wu_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_d_wu_w ((v8u32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_w_hu_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_w_hu_h ((v16u16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_h_bu_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_h_bu_b ((v32u8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_qu_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_qu_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_q_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_q_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_qu_du (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_qu_du ((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_q_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_q_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_d_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_d_w ((v4i64)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_w_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_w_h ((v8i32)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_h_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_h_b ((v16i16)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_q_du (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_q_du ((v4u64)_1, (v4u64)_2, (v4u64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_d_wu (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_d_wu ((v4u64)_1, (v8u32)_2, (v8u32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_w_hu (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_w_hu ((v8u32)_1, (v16u16)_2, (v16u16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_h_bu (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_h_bu ((v16u16)_1, (v32u8)_2, (v32u8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_q_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_q_d ((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_d_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_d_w ((v4i64)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_w_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_w_h ((v8i32)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_h_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_h_b ((v16i16)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_q_du (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_q_du ((v4u64)_1, (v4u64)_2, (v4u64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_d_wu (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_d_wu ((v4u64)_1, (v8u32)_2, (v8u32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_w_hu (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_w_hu ((v8u32)_1, (v16u16)_2, (v16u16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_h_bu (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_h_bu ((v16u16)_1, (v32u8)_2, (v32u8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_q_du_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_q_du_d ((v4i64)_1, (v4u64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_d_wu_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_d_wu_w ((v4i64)_1, (v8u32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_w_hu_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_w_hu_h ((v8i32)_1, (v16u16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_h_bu_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_h_bu_b ((v16i16)_1, (v32u8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_q_du_d (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_q_du_d ((v4i64)_1, (v4u64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_d_wu_w (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_d_wu_w ((v4i64)_1, (v8u32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_w_hu_h (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_w_hu_h ((v8i32)_1, (v16u16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_h_bu_b (__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_h_bu_b ((v16i16)_1, (v32u8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvrotr_b (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvrotr_b ((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvrotr_h (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvrotr_h ((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvrotr_w (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvrotr_w ((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvrotr_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvrotr_d ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_q (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_q ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_q (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_q ((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_q_du_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_q_du_d ((v4u64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_q_du_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_q_du_d ((v4u64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_q_du_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_q_du_d ((v4u64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_q_du_d (__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_q_du_d ((v4u64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskgez_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskgez_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsknz_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmsknz_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_h_b (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_h_b ((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_w_h (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_w_h ((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_d_w (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_d_w ((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_q_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_q_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV16HI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_hu_bu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_hu_bu ((v32u8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV8SI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_wu_hu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_wu_hu ((v16u16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_du_wu (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_du_wu ((v8u32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_qu_du (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_qu_du ((v4u64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvrotri_b(/*__m256i*/ _1, /*ui3*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrotri_b ((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvrotri_h(/*__m256i*/ _1, /*ui4*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrotri_h ((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvrotri_w(/*__m256i*/ _1, /*ui5*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrotri_w ((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvrotri_d(/*__m256i*/ _1, /*ui6*/ _2) \ ++ ((__m256i)__builtin_lasx_xvrotri_d ((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvextl_q_d (__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvextl_q_d ((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvsrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvsrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvsrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvsrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvsrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlrni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvsrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlrni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvsrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlrni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvsrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrlrni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvssrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvssrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvssrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvssrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ ++#define __lasx_xvssrlni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_bu_h ((v32u8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ ++#define __lasx_xvssrlni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_hu_w ((v16u16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ ++#define __lasx_xvssrlni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_wu_d ((v8u32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ ++#define __lasx_xvssrlni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlni_du_q ((v4u64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvssrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvssrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvssrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvssrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ ++#define __lasx_xvssrlrni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_bu_h ((v32u8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ ++#define __lasx_xvssrlrni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_hu_w ((v16u16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ ++#define __lasx_xvssrlrni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_wu_d ((v8u32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ ++#define __lasx_xvssrlrni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrlrni_du_q ((v4u64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvsrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrani_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvsrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrani_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvsrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrani_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvsrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrani_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvsrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrarni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvsrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrarni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvsrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrarni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvsrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvsrarni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvssrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvssrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvssrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvssrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ ++#define __lasx_xvssrani_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_bu_h ((v32u8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ ++#define __lasx_xvssrani_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_hu_w ((v16u16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ ++#define __lasx_xvssrani_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_wu_d ((v8u32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ ++#define __lasx_xvssrani_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrani_du_q ((v4u64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvssrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_b_h ((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvssrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_h_w ((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvssrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_w_d ((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvssrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_d_q ((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ ++#define __lasx_xvssrarni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_bu_h ((v32u8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ ++#define __lasx_xvssrarni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_hu_w ((v16u16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ ++#define __lasx_xvssrarni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_wu_d ((v8u32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ ++#define __lasx_xvssrarni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \ ++ ((__m256i)__builtin_lasx_xvssrarni_du_q ((v4u64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV32QI. */ ++#define __lasx_xbnz_b(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbnz_b ((v32u8)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV4DI. */ ++#define __lasx_xbnz_d(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbnz_d ((v4u64)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV16HI. */ ++#define __lasx_xbnz_h(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbnz_h ((v16u16)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV32QI. */ ++#define __lasx_xbnz_v(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbnz_v ((v32u8)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV8SI. */ ++#define __lasx_xbnz_w(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbnz_w ((v8u32)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV32QI. */ ++#define __lasx_xbz_b(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbz_b ((v32u8)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV4DI. */ ++#define __lasx_xbz_d(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbz_d ((v4u64)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV16HI. */ ++#define __lasx_xbz_h(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbz_h ((v16u16)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV32QI. */ ++#define __lasx_xbz_v(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbz_v ((v32u8)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV8SI. */ ++#define __lasx_xbz_w(/*__m256i*/ _1) \ ++ ((int)__builtin_lasx_xbz_w ((v8u32)(_1))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_caf_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_caf_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_caf_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_caf_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_ceq_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_ceq_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_ceq_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_ceq_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cle_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cle_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cle_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cle_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_clt_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_clt_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_clt_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_clt_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cne_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cne_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cne_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cne_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cor_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cor_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cor_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cor_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cueq_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cueq_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cueq_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cueq_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cule_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cule_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cule_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cule_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cult_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cult_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cult_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cult_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cun_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cun_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cune_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cune_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cune_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cune_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cun_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cun_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_saf_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_saf_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_saf_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_saf_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_seq_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_seq_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_seq_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_seq_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sle_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sle_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sle_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sle_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_slt_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_slt_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_slt_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_slt_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sne_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sne_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sne_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sne_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sor_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sor_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sor_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sor_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sueq_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sueq_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sueq_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sueq_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sule_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sule_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sule_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sule_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sult_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sult_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sult_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sult_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sun_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sun_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sune_d (__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sune_d ((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sune_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sune_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sun_s (__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sun_s ((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui2. */ ++/* Data types in instruction templates: V4DF, V4DF, UQI. */ ++#define __lasx_xvpickve_d_f(/*__m256d*/ _1, /*ui2*/ _2) \ ++ ((__m256d)__builtin_lasx_xvpickve_d_f ((v4f64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V8SF, V8SF, UQI. */ ++#define __lasx_xvpickve_w_f(/*__m256*/ _1, /*ui3*/ _2) \ ++ ((__m256)__builtin_lasx_xvpickve_w_f ((v8f32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, si10. */ ++/* Data types in instruction templates: V32QI, HI. */ ++#define __lasx_xvrepli_b(/*si10*/ _1) \ ++ ((__m256i)__builtin_lasx_xvrepli_b ((_1))) ++ ++/* Assembly instruction format: xd, si10. */ ++/* Data types in instruction templates: V4DI, HI. */ ++#define __lasx_xvrepli_d(/*si10*/ _1) \ ++ ((__m256i)__builtin_lasx_xvrepli_d ((_1))) ++ ++/* Assembly instruction format: xd, si10. */ ++/* Data types in instruction templates: V16HI, HI. */ ++#define __lasx_xvrepli_h(/*si10*/ _1) \ ++ ((__m256i)__builtin_lasx_xvrepli_h ((_1))) ++ ++/* Assembly instruction format: xd, si10. */ ++/* Data types in instruction templates: V8SI, HI. */ ++#define __lasx_xvrepli_w(/*si10*/ _1) \ ++ ((__m256i)__builtin_lasx_xvrepli_w ((_1))) ++ ++#endif /* defined(__loongarch_asx). */ ++#endif /* _GCC_LOONGSON_ASXINTRIN_H. */ +diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc +index de6428ac6..a4a7dbec9 100644 +--- a/gcc/config/loongarch/loongarch-builtins.cc ++++ b/gcc/config/loongarch/loongarch-builtins.cc +@@ -74,6 +74,13 @@ enum loongarch_builtin_type + /* The function corresponds to an LSX conditional branch instruction + combined with a compare instruction. */ + LARCH_BUILTIN_LSX_TEST_BRANCH, ++ ++ /* For generating LoongArch LASX. */ ++ LARCH_BUILTIN_LASX, ++ ++ /* The function corresponds to an LASX conditional branch instruction ++ combined with a compare instruction. */ ++ LARCH_BUILTIN_LASX_TEST_BRANCH, + }; + + /* Declare an availability predicate for built-in functions that require +@@ -112,6 +119,7 @@ struct loongarch_builtin_description + + AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI) + AVAIL_ALL (lsx, ISA_HAS_LSX) ++AVAIL_ALL (lasx, ISA_HAS_LASX) + + /* Construct a loongarch_builtin_description from the given arguments. + +@@ -173,6 +181,30 @@ AVAIL_ALL (lsx, ISA_HAS_LSX) + "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ + FUNCTION_TYPE, loongarch_builtin_avail_lsx } + ++/* Define an LASX LARCH_BUILTIN_DIRECT function __builtin_lasx_ ++ for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LASX_BUILTIN(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lasx_ ## INSN, \ ++ "__builtin_lasx_" #INSN, LARCH_BUILTIN_LASX, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lasx } ++ ++/* Define an LASX LARCH_BUILTIN_DIRECT_NO_TARGET function __builtin_lasx_ ++ for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LASX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lasx_ ## INSN, \ ++ "__builtin_lasx_" #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lasx } ++ ++/* Define an LASX LARCH_BUILTIN_LASX_TEST_BRANCH function __builtin_lasx_ ++ for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LASX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lasx_ ## INSN, \ ++ "__builtin_lasx_" #INSN, LARCH_BUILTIN_LASX_TEST_BRANCH, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lasx } ++ + /* LoongArch SX define CODE_FOR_lsx_xxx */ + #define CODE_FOR_lsx_vsadd_b CODE_FOR_ssaddv16qi3 + #define CODE_FOR_lsx_vsadd_h CODE_FOR_ssaddv8hi3 +@@ -442,6 +474,276 @@ AVAIL_ALL (lsx, ISA_HAS_LSX) + #define CODE_FOR_lsx_vssrlrn_hu_w CODE_FOR_lsx_vssrlrn_u_hu_w + #define CODE_FOR_lsx_vssrlrn_wu_d CODE_FOR_lsx_vssrlrn_u_wu_d + ++/* LoongArch ASX define CODE_FOR_lasx_mxxx */ ++#define CODE_FOR_lasx_xvsadd_b CODE_FOR_ssaddv32qi3 ++#define CODE_FOR_lasx_xvsadd_h CODE_FOR_ssaddv16hi3 ++#define CODE_FOR_lasx_xvsadd_w CODE_FOR_ssaddv8si3 ++#define CODE_FOR_lasx_xvsadd_d CODE_FOR_ssaddv4di3 ++#define CODE_FOR_lasx_xvsadd_bu CODE_FOR_usaddv32qi3 ++#define CODE_FOR_lasx_xvsadd_hu CODE_FOR_usaddv16hi3 ++#define CODE_FOR_lasx_xvsadd_wu CODE_FOR_usaddv8si3 ++#define CODE_FOR_lasx_xvsadd_du CODE_FOR_usaddv4di3 ++#define CODE_FOR_lasx_xvadd_b CODE_FOR_addv32qi3 ++#define CODE_FOR_lasx_xvadd_h CODE_FOR_addv16hi3 ++#define CODE_FOR_lasx_xvadd_w CODE_FOR_addv8si3 ++#define CODE_FOR_lasx_xvadd_d CODE_FOR_addv4di3 ++#define CODE_FOR_lasx_xvaddi_bu CODE_FOR_addv32qi3 ++#define CODE_FOR_lasx_xvaddi_hu CODE_FOR_addv16hi3 ++#define CODE_FOR_lasx_xvaddi_wu CODE_FOR_addv8si3 ++#define CODE_FOR_lasx_xvaddi_du CODE_FOR_addv4di3 ++#define CODE_FOR_lasx_xvand_v CODE_FOR_andv32qi3 ++#define CODE_FOR_lasx_xvandi_b CODE_FOR_andv32qi3 ++#define CODE_FOR_lasx_xvbitsel_v CODE_FOR_lasx_xvbitsel_b ++#define CODE_FOR_lasx_xvseqi_b CODE_FOR_lasx_xvseq_b ++#define CODE_FOR_lasx_xvseqi_h CODE_FOR_lasx_xvseq_h ++#define CODE_FOR_lasx_xvseqi_w CODE_FOR_lasx_xvseq_w ++#define CODE_FOR_lasx_xvseqi_d CODE_FOR_lasx_xvseq_d ++#define CODE_FOR_lasx_xvslti_b CODE_FOR_lasx_xvslt_b ++#define CODE_FOR_lasx_xvslti_h CODE_FOR_lasx_xvslt_h ++#define CODE_FOR_lasx_xvslti_w CODE_FOR_lasx_xvslt_w ++#define CODE_FOR_lasx_xvslti_d CODE_FOR_lasx_xvslt_d ++#define CODE_FOR_lasx_xvslti_bu CODE_FOR_lasx_xvslt_bu ++#define CODE_FOR_lasx_xvslti_hu CODE_FOR_lasx_xvslt_hu ++#define CODE_FOR_lasx_xvslti_wu CODE_FOR_lasx_xvslt_wu ++#define CODE_FOR_lasx_xvslti_du CODE_FOR_lasx_xvslt_du ++#define CODE_FOR_lasx_xvslei_b CODE_FOR_lasx_xvsle_b ++#define CODE_FOR_lasx_xvslei_h CODE_FOR_lasx_xvsle_h ++#define CODE_FOR_lasx_xvslei_w CODE_FOR_lasx_xvsle_w ++#define CODE_FOR_lasx_xvslei_d CODE_FOR_lasx_xvsle_d ++#define CODE_FOR_lasx_xvslei_bu CODE_FOR_lasx_xvsle_bu ++#define CODE_FOR_lasx_xvslei_hu CODE_FOR_lasx_xvsle_hu ++#define CODE_FOR_lasx_xvslei_wu CODE_FOR_lasx_xvsle_wu ++#define CODE_FOR_lasx_xvslei_du CODE_FOR_lasx_xvsle_du ++#define CODE_FOR_lasx_xvdiv_b CODE_FOR_divv32qi3 ++#define CODE_FOR_lasx_xvdiv_h CODE_FOR_divv16hi3 ++#define CODE_FOR_lasx_xvdiv_w CODE_FOR_divv8si3 ++#define CODE_FOR_lasx_xvdiv_d CODE_FOR_divv4di3 ++#define CODE_FOR_lasx_xvdiv_bu CODE_FOR_udivv32qi3 ++#define CODE_FOR_lasx_xvdiv_hu CODE_FOR_udivv16hi3 ++#define CODE_FOR_lasx_xvdiv_wu CODE_FOR_udivv8si3 ++#define CODE_FOR_lasx_xvdiv_du CODE_FOR_udivv4di3 ++#define CODE_FOR_lasx_xvfadd_s CODE_FOR_addv8sf3 ++#define CODE_FOR_lasx_xvfadd_d CODE_FOR_addv4df3 ++#define CODE_FOR_lasx_xvftintrz_w_s CODE_FOR_fix_truncv8sfv8si2 ++#define CODE_FOR_lasx_xvftintrz_l_d CODE_FOR_fix_truncv4dfv4di2 ++#define CODE_FOR_lasx_xvftintrz_wu_s CODE_FOR_fixuns_truncv8sfv8si2 ++#define CODE_FOR_lasx_xvftintrz_lu_d CODE_FOR_fixuns_truncv4dfv4di2 ++#define CODE_FOR_lasx_xvffint_s_w CODE_FOR_floatv8siv8sf2 ++#define CODE_FOR_lasx_xvffint_d_l CODE_FOR_floatv4div4df2 ++#define CODE_FOR_lasx_xvffint_s_wu CODE_FOR_floatunsv8siv8sf2 ++#define CODE_FOR_lasx_xvffint_d_lu CODE_FOR_floatunsv4div4df2 ++#define CODE_FOR_lasx_xvfsub_s CODE_FOR_subv8sf3 ++#define CODE_FOR_lasx_xvfsub_d CODE_FOR_subv4df3 ++#define CODE_FOR_lasx_xvfmul_s CODE_FOR_mulv8sf3 ++#define CODE_FOR_lasx_xvfmul_d CODE_FOR_mulv4df3 ++#define CODE_FOR_lasx_xvfdiv_s CODE_FOR_divv8sf3 ++#define CODE_FOR_lasx_xvfdiv_d CODE_FOR_divv4df3 ++#define CODE_FOR_lasx_xvfmax_s CODE_FOR_smaxv8sf3 ++#define CODE_FOR_lasx_xvfmax_d CODE_FOR_smaxv4df3 ++#define CODE_FOR_lasx_xvfmin_s CODE_FOR_sminv8sf3 ++#define CODE_FOR_lasx_xvfmin_d CODE_FOR_sminv4df3 ++#define CODE_FOR_lasx_xvfsqrt_s CODE_FOR_sqrtv8sf2 ++#define CODE_FOR_lasx_xvfsqrt_d CODE_FOR_sqrtv4df2 ++#define CODE_FOR_lasx_xvflogb_s CODE_FOR_logbv8sf2 ++#define CODE_FOR_lasx_xvflogb_d CODE_FOR_logbv4df2 ++#define CODE_FOR_lasx_xvmax_b CODE_FOR_smaxv32qi3 ++#define CODE_FOR_lasx_xvmax_h CODE_FOR_smaxv16hi3 ++#define CODE_FOR_lasx_xvmax_w CODE_FOR_smaxv8si3 ++#define CODE_FOR_lasx_xvmax_d CODE_FOR_smaxv4di3 ++#define CODE_FOR_lasx_xvmaxi_b CODE_FOR_smaxv32qi3 ++#define CODE_FOR_lasx_xvmaxi_h CODE_FOR_smaxv16hi3 ++#define CODE_FOR_lasx_xvmaxi_w CODE_FOR_smaxv8si3 ++#define CODE_FOR_lasx_xvmaxi_d CODE_FOR_smaxv4di3 ++#define CODE_FOR_lasx_xvmax_bu CODE_FOR_umaxv32qi3 ++#define CODE_FOR_lasx_xvmax_hu CODE_FOR_umaxv16hi3 ++#define CODE_FOR_lasx_xvmax_wu CODE_FOR_umaxv8si3 ++#define CODE_FOR_lasx_xvmax_du CODE_FOR_umaxv4di3 ++#define CODE_FOR_lasx_xvmaxi_bu CODE_FOR_umaxv32qi3 ++#define CODE_FOR_lasx_xvmaxi_hu CODE_FOR_umaxv16hi3 ++#define CODE_FOR_lasx_xvmaxi_wu CODE_FOR_umaxv8si3 ++#define CODE_FOR_lasx_xvmaxi_du CODE_FOR_umaxv4di3 ++#define CODE_FOR_lasx_xvmin_b CODE_FOR_sminv32qi3 ++#define CODE_FOR_lasx_xvmin_h CODE_FOR_sminv16hi3 ++#define CODE_FOR_lasx_xvmin_w CODE_FOR_sminv8si3 ++#define CODE_FOR_lasx_xvmin_d CODE_FOR_sminv4di3 ++#define CODE_FOR_lasx_xvmini_b CODE_FOR_sminv32qi3 ++#define CODE_FOR_lasx_xvmini_h CODE_FOR_sminv16hi3 ++#define CODE_FOR_lasx_xvmini_w CODE_FOR_sminv8si3 ++#define CODE_FOR_lasx_xvmini_d CODE_FOR_sminv4di3 ++#define CODE_FOR_lasx_xvmin_bu CODE_FOR_uminv32qi3 ++#define CODE_FOR_lasx_xvmin_hu CODE_FOR_uminv16hi3 ++#define CODE_FOR_lasx_xvmin_wu CODE_FOR_uminv8si3 ++#define CODE_FOR_lasx_xvmin_du CODE_FOR_uminv4di3 ++#define CODE_FOR_lasx_xvmini_bu CODE_FOR_uminv32qi3 ++#define CODE_FOR_lasx_xvmini_hu CODE_FOR_uminv16hi3 ++#define CODE_FOR_lasx_xvmini_wu CODE_FOR_uminv8si3 ++#define CODE_FOR_lasx_xvmini_du CODE_FOR_uminv4di3 ++#define CODE_FOR_lasx_xvmod_b CODE_FOR_modv32qi3 ++#define CODE_FOR_lasx_xvmod_h CODE_FOR_modv16hi3 ++#define CODE_FOR_lasx_xvmod_w CODE_FOR_modv8si3 ++#define CODE_FOR_lasx_xvmod_d CODE_FOR_modv4di3 ++#define CODE_FOR_lasx_xvmod_bu CODE_FOR_umodv32qi3 ++#define CODE_FOR_lasx_xvmod_hu CODE_FOR_umodv16hi3 ++#define CODE_FOR_lasx_xvmod_wu CODE_FOR_umodv8si3 ++#define CODE_FOR_lasx_xvmod_du CODE_FOR_umodv4di3 ++#define CODE_FOR_lasx_xvmul_b CODE_FOR_mulv32qi3 ++#define CODE_FOR_lasx_xvmul_h CODE_FOR_mulv16hi3 ++#define CODE_FOR_lasx_xvmul_w CODE_FOR_mulv8si3 ++#define CODE_FOR_lasx_xvmul_d CODE_FOR_mulv4di3 ++#define CODE_FOR_lasx_xvclz_b CODE_FOR_clzv32qi2 ++#define CODE_FOR_lasx_xvclz_h CODE_FOR_clzv16hi2 ++#define CODE_FOR_lasx_xvclz_w CODE_FOR_clzv8si2 ++#define CODE_FOR_lasx_xvclz_d CODE_FOR_clzv4di2 ++#define CODE_FOR_lasx_xvnor_v CODE_FOR_lasx_xvnor_b ++#define CODE_FOR_lasx_xvor_v CODE_FOR_iorv32qi3 ++#define CODE_FOR_lasx_xvori_b CODE_FOR_iorv32qi3 ++#define CODE_FOR_lasx_xvnori_b CODE_FOR_lasx_xvnor_b ++#define CODE_FOR_lasx_xvpcnt_b CODE_FOR_popcountv32qi2 ++#define CODE_FOR_lasx_xvpcnt_h CODE_FOR_popcountv16hi2 ++#define CODE_FOR_lasx_xvpcnt_w CODE_FOR_popcountv8si2 ++#define CODE_FOR_lasx_xvpcnt_d CODE_FOR_popcountv4di2 ++#define CODE_FOR_lasx_xvxor_v CODE_FOR_xorv32qi3 ++#define CODE_FOR_lasx_xvxori_b CODE_FOR_xorv32qi3 ++#define CODE_FOR_lasx_xvsll_b CODE_FOR_vashlv32qi3 ++#define CODE_FOR_lasx_xvsll_h CODE_FOR_vashlv16hi3 ++#define CODE_FOR_lasx_xvsll_w CODE_FOR_vashlv8si3 ++#define CODE_FOR_lasx_xvsll_d CODE_FOR_vashlv4di3 ++#define CODE_FOR_lasx_xvslli_b CODE_FOR_vashlv32qi3 ++#define CODE_FOR_lasx_xvslli_h CODE_FOR_vashlv16hi3 ++#define CODE_FOR_lasx_xvslli_w CODE_FOR_vashlv8si3 ++#define CODE_FOR_lasx_xvslli_d CODE_FOR_vashlv4di3 ++#define CODE_FOR_lasx_xvsra_b CODE_FOR_vashrv32qi3 ++#define CODE_FOR_lasx_xvsra_h CODE_FOR_vashrv16hi3 ++#define CODE_FOR_lasx_xvsra_w CODE_FOR_vashrv8si3 ++#define CODE_FOR_lasx_xvsra_d CODE_FOR_vashrv4di3 ++#define CODE_FOR_lasx_xvsrai_b CODE_FOR_vashrv32qi3 ++#define CODE_FOR_lasx_xvsrai_h CODE_FOR_vashrv16hi3 ++#define CODE_FOR_lasx_xvsrai_w CODE_FOR_vashrv8si3 ++#define CODE_FOR_lasx_xvsrai_d CODE_FOR_vashrv4di3 ++#define CODE_FOR_lasx_xvsrl_b CODE_FOR_vlshrv32qi3 ++#define CODE_FOR_lasx_xvsrl_h CODE_FOR_vlshrv16hi3 ++#define CODE_FOR_lasx_xvsrl_w CODE_FOR_vlshrv8si3 ++#define CODE_FOR_lasx_xvsrl_d CODE_FOR_vlshrv4di3 ++#define CODE_FOR_lasx_xvsrli_b CODE_FOR_vlshrv32qi3 ++#define CODE_FOR_lasx_xvsrli_h CODE_FOR_vlshrv16hi3 ++#define CODE_FOR_lasx_xvsrli_w CODE_FOR_vlshrv8si3 ++#define CODE_FOR_lasx_xvsrli_d CODE_FOR_vlshrv4di3 ++#define CODE_FOR_lasx_xvsub_b CODE_FOR_subv32qi3 ++#define CODE_FOR_lasx_xvsub_h CODE_FOR_subv16hi3 ++#define CODE_FOR_lasx_xvsub_w CODE_FOR_subv8si3 ++#define CODE_FOR_lasx_xvsub_d CODE_FOR_subv4di3 ++#define CODE_FOR_lasx_xvsubi_bu CODE_FOR_subv32qi3 ++#define CODE_FOR_lasx_xvsubi_hu CODE_FOR_subv16hi3 ++#define CODE_FOR_lasx_xvsubi_wu CODE_FOR_subv8si3 ++#define CODE_FOR_lasx_xvsubi_du CODE_FOR_subv4di3 ++#define CODE_FOR_lasx_xvpackod_d CODE_FOR_lasx_xvilvh_d ++#define CODE_FOR_lasx_xvpackev_d CODE_FOR_lasx_xvilvl_d ++#define CODE_FOR_lasx_xvpickod_d CODE_FOR_lasx_xvilvh_d ++#define CODE_FOR_lasx_xvpickev_d CODE_FOR_lasx_xvilvl_d ++#define CODE_FOR_lasx_xvrepli_b CODE_FOR_lasx_xvrepliv32qi ++#define CODE_FOR_lasx_xvrepli_h CODE_FOR_lasx_xvrepliv16hi ++#define CODE_FOR_lasx_xvrepli_w CODE_FOR_lasx_xvrepliv8si ++#define CODE_FOR_lasx_xvrepli_d CODE_FOR_lasx_xvrepliv4di ++ ++#define CODE_FOR_lasx_xvandn_v CODE_FOR_xvandnv32qi3 ++#define CODE_FOR_lasx_xvorn_v CODE_FOR_xvornv32qi3 ++#define CODE_FOR_lasx_xvneg_b CODE_FOR_negv32qi2 ++#define CODE_FOR_lasx_xvneg_h CODE_FOR_negv16hi2 ++#define CODE_FOR_lasx_xvneg_w CODE_FOR_negv8si2 ++#define CODE_FOR_lasx_xvneg_d CODE_FOR_negv4di2 ++#define CODE_FOR_lasx_xvbsrl_v CODE_FOR_lasx_xvbsrl_b ++#define CODE_FOR_lasx_xvbsll_v CODE_FOR_lasx_xvbsll_b ++#define CODE_FOR_lasx_xvfmadd_s CODE_FOR_fmav8sf4 ++#define CODE_FOR_lasx_xvfmadd_d CODE_FOR_fmav4df4 ++#define CODE_FOR_lasx_xvfmsub_s CODE_FOR_fmsv8sf4 ++#define CODE_FOR_lasx_xvfmsub_d CODE_FOR_fmsv4df4 ++#define CODE_FOR_lasx_xvfnmadd_s CODE_FOR_xvfnmaddv8sf4_nmadd4 ++#define CODE_FOR_lasx_xvfnmadd_d CODE_FOR_xvfnmaddv4df4_nmadd4 ++#define CODE_FOR_lasx_xvfnmsub_s CODE_FOR_xvfnmsubv8sf4_nmsub4 ++#define CODE_FOR_lasx_xvfnmsub_d CODE_FOR_xvfnmsubv4df4_nmsub4 ++ ++#define CODE_FOR_lasx_xvpermi_q CODE_FOR_lasx_xvpermi_q_v32qi ++#define CODE_FOR_lasx_xvpermi_d CODE_FOR_lasx_xvpermi_d_v4di ++#define CODE_FOR_lasx_xbnz_v CODE_FOR_lasx_xbnz_v_b ++#define CODE_FOR_lasx_xbz_v CODE_FOR_lasx_xbz_v_b ++ ++#define CODE_FOR_lasx_xvssub_b CODE_FOR_lasx_xvssub_s_b ++#define CODE_FOR_lasx_xvssub_h CODE_FOR_lasx_xvssub_s_h ++#define CODE_FOR_lasx_xvssub_w CODE_FOR_lasx_xvssub_s_w ++#define CODE_FOR_lasx_xvssub_d CODE_FOR_lasx_xvssub_s_d ++#define CODE_FOR_lasx_xvssub_bu CODE_FOR_lasx_xvssub_u_bu ++#define CODE_FOR_lasx_xvssub_hu CODE_FOR_lasx_xvssub_u_hu ++#define CODE_FOR_lasx_xvssub_wu CODE_FOR_lasx_xvssub_u_wu ++#define CODE_FOR_lasx_xvssub_du CODE_FOR_lasx_xvssub_u_du ++#define CODE_FOR_lasx_xvabsd_b CODE_FOR_lasx_xvabsd_s_b ++#define CODE_FOR_lasx_xvabsd_h CODE_FOR_lasx_xvabsd_s_h ++#define CODE_FOR_lasx_xvabsd_w CODE_FOR_lasx_xvabsd_s_w ++#define CODE_FOR_lasx_xvabsd_d CODE_FOR_lasx_xvabsd_s_d ++#define CODE_FOR_lasx_xvabsd_bu CODE_FOR_lasx_xvabsd_u_bu ++#define CODE_FOR_lasx_xvabsd_hu CODE_FOR_lasx_xvabsd_u_hu ++#define CODE_FOR_lasx_xvabsd_wu CODE_FOR_lasx_xvabsd_u_wu ++#define CODE_FOR_lasx_xvabsd_du CODE_FOR_lasx_xvabsd_u_du ++#define CODE_FOR_lasx_xvavg_b CODE_FOR_lasx_xvavg_s_b ++#define CODE_FOR_lasx_xvavg_h CODE_FOR_lasx_xvavg_s_h ++#define CODE_FOR_lasx_xvavg_w CODE_FOR_lasx_xvavg_s_w ++#define CODE_FOR_lasx_xvavg_d CODE_FOR_lasx_xvavg_s_d ++#define CODE_FOR_lasx_xvavg_bu CODE_FOR_lasx_xvavg_u_bu ++#define CODE_FOR_lasx_xvavg_hu CODE_FOR_lasx_xvavg_u_hu ++#define CODE_FOR_lasx_xvavg_wu CODE_FOR_lasx_xvavg_u_wu ++#define CODE_FOR_lasx_xvavg_du CODE_FOR_lasx_xvavg_u_du ++#define CODE_FOR_lasx_xvavgr_b CODE_FOR_lasx_xvavgr_s_b ++#define CODE_FOR_lasx_xvavgr_h CODE_FOR_lasx_xvavgr_s_h ++#define CODE_FOR_lasx_xvavgr_w CODE_FOR_lasx_xvavgr_s_w ++#define CODE_FOR_lasx_xvavgr_d CODE_FOR_lasx_xvavgr_s_d ++#define CODE_FOR_lasx_xvavgr_bu CODE_FOR_lasx_xvavgr_u_bu ++#define CODE_FOR_lasx_xvavgr_hu CODE_FOR_lasx_xvavgr_u_hu ++#define CODE_FOR_lasx_xvavgr_wu CODE_FOR_lasx_xvavgr_u_wu ++#define CODE_FOR_lasx_xvavgr_du CODE_FOR_lasx_xvavgr_u_du ++#define CODE_FOR_lasx_xvmuh_b CODE_FOR_lasx_xvmuh_s_b ++#define CODE_FOR_lasx_xvmuh_h CODE_FOR_lasx_xvmuh_s_h ++#define CODE_FOR_lasx_xvmuh_w CODE_FOR_lasx_xvmuh_s_w ++#define CODE_FOR_lasx_xvmuh_d CODE_FOR_lasx_xvmuh_s_d ++#define CODE_FOR_lasx_xvmuh_bu CODE_FOR_lasx_xvmuh_u_bu ++#define CODE_FOR_lasx_xvmuh_hu CODE_FOR_lasx_xvmuh_u_hu ++#define CODE_FOR_lasx_xvmuh_wu CODE_FOR_lasx_xvmuh_u_wu ++#define CODE_FOR_lasx_xvmuh_du CODE_FOR_lasx_xvmuh_u_du ++#define CODE_FOR_lasx_xvssran_b_h CODE_FOR_lasx_xvssran_s_b_h ++#define CODE_FOR_lasx_xvssran_h_w CODE_FOR_lasx_xvssran_s_h_w ++#define CODE_FOR_lasx_xvssran_w_d CODE_FOR_lasx_xvssran_s_w_d ++#define CODE_FOR_lasx_xvssran_bu_h CODE_FOR_lasx_xvssran_u_bu_h ++#define CODE_FOR_lasx_xvssran_hu_w CODE_FOR_lasx_xvssran_u_hu_w ++#define CODE_FOR_lasx_xvssran_wu_d CODE_FOR_lasx_xvssran_u_wu_d ++#define CODE_FOR_lasx_xvssrarn_b_h CODE_FOR_lasx_xvssrarn_s_b_h ++#define CODE_FOR_lasx_xvssrarn_h_w CODE_FOR_lasx_xvssrarn_s_h_w ++#define CODE_FOR_lasx_xvssrarn_w_d CODE_FOR_lasx_xvssrarn_s_w_d ++#define CODE_FOR_lasx_xvssrarn_bu_h CODE_FOR_lasx_xvssrarn_u_bu_h ++#define CODE_FOR_lasx_xvssrarn_hu_w CODE_FOR_lasx_xvssrarn_u_hu_w ++#define CODE_FOR_lasx_xvssrarn_wu_d CODE_FOR_lasx_xvssrarn_u_wu_d ++#define CODE_FOR_lasx_xvssrln_bu_h CODE_FOR_lasx_xvssrln_u_bu_h ++#define CODE_FOR_lasx_xvssrln_hu_w CODE_FOR_lasx_xvssrln_u_hu_w ++#define CODE_FOR_lasx_xvssrln_wu_d CODE_FOR_lasx_xvssrln_u_wu_d ++#define CODE_FOR_lasx_xvssrlrn_bu_h CODE_FOR_lasx_xvssrlrn_u_bu_h ++#define CODE_FOR_lasx_xvssrlrn_hu_w CODE_FOR_lasx_xvssrlrn_u_hu_w ++#define CODE_FOR_lasx_xvssrlrn_wu_d CODE_FOR_lasx_xvssrlrn_u_wu_d ++#define CODE_FOR_lasx_xvftint_w_s CODE_FOR_lasx_xvftint_s_w_s ++#define CODE_FOR_lasx_xvftint_l_d CODE_FOR_lasx_xvftint_s_l_d ++#define CODE_FOR_lasx_xvftint_wu_s CODE_FOR_lasx_xvftint_u_wu_s ++#define CODE_FOR_lasx_xvftint_lu_d CODE_FOR_lasx_xvftint_u_lu_d ++#define CODE_FOR_lasx_xvsllwil_h_b CODE_FOR_lasx_xvsllwil_s_h_b ++#define CODE_FOR_lasx_xvsllwil_w_h CODE_FOR_lasx_xvsllwil_s_w_h ++#define CODE_FOR_lasx_xvsllwil_d_w CODE_FOR_lasx_xvsllwil_s_d_w ++#define CODE_FOR_lasx_xvsllwil_hu_bu CODE_FOR_lasx_xvsllwil_u_hu_bu ++#define CODE_FOR_lasx_xvsllwil_wu_hu CODE_FOR_lasx_xvsllwil_u_wu_hu ++#define CODE_FOR_lasx_xvsllwil_du_wu CODE_FOR_lasx_xvsllwil_u_du_wu ++#define CODE_FOR_lasx_xvsat_b CODE_FOR_lasx_xvsat_s_b ++#define CODE_FOR_lasx_xvsat_h CODE_FOR_lasx_xvsat_s_h ++#define CODE_FOR_lasx_xvsat_w CODE_FOR_lasx_xvsat_s_w ++#define CODE_FOR_lasx_xvsat_d CODE_FOR_lasx_xvsat_s_d ++#define CODE_FOR_lasx_xvsat_bu CODE_FOR_lasx_xvsat_u_bu ++#define CODE_FOR_lasx_xvsat_hu CODE_FOR_lasx_xvsat_u_hu ++#define CODE_FOR_lasx_xvsat_wu CODE_FOR_lasx_xvsat_u_wu ++#define CODE_FOR_lasx_xvsat_du CODE_FOR_lasx_xvsat_u_du ++ + static const struct loongarch_builtin_description loongarch_builtins[] = { + #define LARCH_MOVFCSR2GR 0 + DIRECT_BUILTIN (movfcsr2gr, LARCH_USI_FTYPE_UQI, hard_float), +@@ -1209,7 +1511,761 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { + LSX_BUILTIN (vshuf_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), + LSX_BUILTIN (vldx, LARCH_V16QI_FTYPE_CVPOINTER_DI), + LSX_NO_TARGET_BUILTIN (vstx, LARCH_VOID_FTYPE_V16QI_CVPOINTER_DI), +- LSX_BUILTIN (vextl_qu_du, LARCH_UV2DI_FTYPE_UV2DI) ++ LSX_BUILTIN (vextl_qu_du, LARCH_UV2DI_FTYPE_UV2DI), ++ ++ /* Built-in functions for LASX */ ++ LASX_BUILTIN (xvsll_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsll_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsll_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsll_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvslli_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvslli_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvslli_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvslli_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsra_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsra_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsra_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsra_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsrai_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsrai_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsrai_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsrai_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsrar_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsrar_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrar_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrar_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsrari_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsrari_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsrari_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsrari_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsrl_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsrl_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrl_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrl_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsrli_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsrli_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsrli_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsrli_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsrlr_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsrlr_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrlr_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrlr_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsrlri_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsrlri_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsrlri_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsrlri_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvbitclr_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvbitclr_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvbitclr_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvbitclr_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvbitclri_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvbitclri_h, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvbitclri_w, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvbitclri_d, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvbitset_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvbitset_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvbitset_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvbitset_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvbitseti_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvbitseti_h, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvbitseti_w, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvbitseti_d, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvbitrev_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvbitrev_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvbitrev_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvbitrev_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvbitrevi_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvbitrevi_h, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvbitrevi_w, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvbitrevi_d, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvaddi_bu, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvaddi_hu, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvaddi_wu, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvaddi_du, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsub_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsub_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsub_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsub_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsubi_bu, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsubi_hu, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsubi_wu, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsubi_du, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvmax_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmax_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmax_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmax_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmaxi_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvmaxi_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvmaxi_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvmaxi_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvmax_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmax_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmax_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmax_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmaxi_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvmaxi_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvmaxi_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvmaxi_du, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvmin_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmin_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmin_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmin_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmini_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvmini_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvmini_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvmini_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvmin_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmin_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmin_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmin_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmini_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvmini_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvmini_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvmini_du, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvseq_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvseq_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvseq_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvseq_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvseqi_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvseqi_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvseqi_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvseqi_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvslt_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvslt_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvslt_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvslt_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvslti_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvslti_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvslti_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvslti_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvslt_bu, LARCH_V32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvslt_hu, LARCH_V16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvslt_wu, LARCH_V8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvslt_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvslti_bu, LARCH_V32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvslti_hu, LARCH_V16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvslti_wu, LARCH_V8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvslti_du, LARCH_V4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvsle_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsle_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsle_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsle_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvslei_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvslei_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvslei_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvslei_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvsle_bu, LARCH_V32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvsle_hu, LARCH_V16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvsle_wu, LARCH_V8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvsle_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvslei_bu, LARCH_V32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvslei_hu, LARCH_V16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvslei_wu, LARCH_V8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvslei_du, LARCH_V4DI_FTYPE_UV4DI_UQI), ++ ++ LASX_BUILTIN (xvsat_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsat_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsat_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsat_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsat_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvsat_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvsat_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvsat_du, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ ++ LASX_BUILTIN (xvadda_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvadda_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvadda_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvadda_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsadd_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvsadd_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvsadd_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvsadd_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvavg_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvavg_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvavg_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvavg_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvavg_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvavg_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvavg_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvavg_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvavgr_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvavgr_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvavgr_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvavgr_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvavgr_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvavgr_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvavgr_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvavgr_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvssub_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvssub_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssub_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssub_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssub_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvssub_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssub_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssub_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvabsd_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvabsd_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvabsd_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvabsd_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvabsd_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvabsd_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvabsd_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvabsd_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvmul_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmul_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmul_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmul_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), ++ LASX_BUILTIN (xvmadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), ++ LASX_BUILTIN (xvmadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI), ++ LASX_BUILTIN (xvmadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvmsub_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), ++ LASX_BUILTIN (xvmsub_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), ++ LASX_BUILTIN (xvmsub_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI), ++ LASX_BUILTIN (xvmsub_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvdiv_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvdiv_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvdiv_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvdiv_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvdiv_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvdiv_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvdiv_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvdiv_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvhaddw_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvhaddw_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvhaddw_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvhaddw_hu_bu, LARCH_UV16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvhaddw_wu_hu, LARCH_UV8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvhaddw_du_wu, LARCH_UV4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvhsubw_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvhsubw_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvhsubw_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvhsubw_hu_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvhsubw_wu_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvhsubw_du_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmod_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmod_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmod_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmod_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmod_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmod_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmod_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmod_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvrepl128vei_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvrepl128vei_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvrepl128vei_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvrepl128vei_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvpickev_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvpickev_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvpickev_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvpickev_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvpickod_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvpickod_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvpickod_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvpickod_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvilvh_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvilvh_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvilvh_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvilvh_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvilvl_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvilvl_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvilvl_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvilvl_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvpackev_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvpackev_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvpackev_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvpackev_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvpackod_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvpackod_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvpackod_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvpackod_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvshuf_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), ++ LASX_BUILTIN (xvshuf_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), ++ LASX_BUILTIN (xvshuf_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI), ++ LASX_BUILTIN (xvshuf_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvand_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvandi_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvnor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvnori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvxor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvxori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvbitsel_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI_UV32QI), ++ LASX_BUILTIN (xvbitseli_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI_USI), ++ ++ LASX_BUILTIN (xvshuf4i_b, LARCH_V32QI_FTYPE_V32QI_USI), ++ LASX_BUILTIN (xvshuf4i_h, LARCH_V16HI_FTYPE_V16HI_USI), ++ LASX_BUILTIN (xvshuf4i_w, LARCH_V8SI_FTYPE_V8SI_USI), ++ ++ LASX_BUILTIN (xvreplgr2vr_b, LARCH_V32QI_FTYPE_SI), ++ LASX_BUILTIN (xvreplgr2vr_h, LARCH_V16HI_FTYPE_SI), ++ LASX_BUILTIN (xvreplgr2vr_w, LARCH_V8SI_FTYPE_SI), ++ LASX_BUILTIN (xvreplgr2vr_d, LARCH_V4DI_FTYPE_DI), ++ LASX_BUILTIN (xvpcnt_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvpcnt_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvpcnt_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvpcnt_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvclo_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvclo_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvclo_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvclo_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvclz_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvclz_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvclz_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvclz_d, LARCH_V4DI_FTYPE_V4DI), ++ ++ LASX_BUILTIN (xvrepli_b, LARCH_V32QI_FTYPE_HI), ++ LASX_BUILTIN (xvrepli_h, LARCH_V16HI_FTYPE_HI), ++ LASX_BUILTIN (xvrepli_w, LARCH_V8SI_FTYPE_HI), ++ LASX_BUILTIN (xvrepli_d, LARCH_V4DI_FTYPE_HI), ++ LASX_BUILTIN (xvfcmp_caf_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_caf_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cor_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cor_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cun_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cun_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cune_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cune_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cueq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cueq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_ceq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_ceq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cne_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cne_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_clt_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_clt_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cult_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cult_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cle_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cle_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cule_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cule_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_saf_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_saf_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sor_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sor_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sun_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sun_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sune_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sune_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sueq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sueq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_seq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_seq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sne_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sne_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_slt_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_slt_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sult_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sult_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sle_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sle_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sule_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sule_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmul_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmul_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfdiv_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfdiv_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcvt_h_s, LARCH_V16HI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcvt_s_d, LARCH_V8SF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmin_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmin_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmina_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmina_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmax_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmax_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmaxa_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmaxa_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfclass_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvfclass_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvfsqrt_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfsqrt_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrecip_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrecip_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrint_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrint_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrsqrt_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrsqrt_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvflogb_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvflogb_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfcvth_s_h, LARCH_V8SF_FTYPE_V16HI), ++ LASX_BUILTIN (xvfcvth_d_s, LARCH_V4DF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfcvtl_s_h, LARCH_V8SF_FTYPE_V16HI), ++ LASX_BUILTIN (xvfcvtl_d_s, LARCH_V4DF_FTYPE_V8SF), ++ LASX_BUILTIN (xvftint_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftint_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftint_wu_s, LARCH_UV8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftint_lu_d, LARCH_UV4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftintrz_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrz_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftintrz_wu_s, LARCH_UV8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrz_lu_d, LARCH_UV4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvffint_s_w, LARCH_V8SF_FTYPE_V8SI), ++ LASX_BUILTIN (xvffint_d_l, LARCH_V4DF_FTYPE_V4DI), ++ LASX_BUILTIN (xvffint_s_wu, LARCH_V8SF_FTYPE_UV8SI), ++ LASX_BUILTIN (xvffint_d_lu, LARCH_V4DF_FTYPE_UV4DI), ++ ++ LASX_BUILTIN (xvreplve_b, LARCH_V32QI_FTYPE_V32QI_SI), ++ LASX_BUILTIN (xvreplve_h, LARCH_V16HI_FTYPE_V16HI_SI), ++ LASX_BUILTIN (xvreplve_w, LARCH_V8SI_FTYPE_V8SI_SI), ++ LASX_BUILTIN (xvreplve_d, LARCH_V4DI_FTYPE_V4DI_SI), ++ LASX_BUILTIN (xvpermi_w, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ ++ LASX_BUILTIN (xvandn_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvneg_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvneg_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvneg_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvneg_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvmuh_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmuh_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmuh_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmuh_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmuh_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmuh_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmuh_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmuh_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsllwil_h_b, LARCH_V16HI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsllwil_w_h, LARCH_V8SI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsllwil_d_w, LARCH_V4DI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsllwil_hu_bu, LARCH_UV16HI_FTYPE_UV32QI_UQI), /* FIXME: U? */ ++ LASX_BUILTIN (xvsllwil_wu_hu, LARCH_UV8SI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvsllwil_du_wu, LARCH_UV4DI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvsran_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsran_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsran_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssran_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssran_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssran_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssran_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssran_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssran_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsrarn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrarn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrarn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrarn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssrarn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssrarn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrarn_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssrarn_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssrarn_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsrln_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrln_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrln_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrln_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssrln_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssrln_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsrlrn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrlrn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrlrn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrlrn_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssrlrn_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssrlrn_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvfrstpi_b, LARCH_V32QI_FTYPE_V32QI_V32QI_UQI), ++ LASX_BUILTIN (xvfrstpi_h, LARCH_V16HI_FTYPE_V16HI_V16HI_UQI), ++ LASX_BUILTIN (xvfrstp_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), ++ LASX_BUILTIN (xvfrstp_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), ++ LASX_BUILTIN (xvshuf4i_d, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvbsrl_v, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvbsll_v, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvextrins_b, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvextrins_h, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvextrins_w, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvextrins_d, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvmskltz_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvmskltz_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvmskltz_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvmskltz_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvsigncov_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsigncov_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsigncov_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsigncov_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvfmadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), ++ LASX_BUILTIN (xvfmadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), ++ LASX_BUILTIN (xvfmsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), ++ LASX_BUILTIN (xvfmsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), ++ LASX_BUILTIN (xvfnmadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), ++ LASX_BUILTIN (xvfnmadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), ++ LASX_BUILTIN (xvfnmsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), ++ LASX_BUILTIN (xvfnmsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), ++ LASX_BUILTIN (xvftintrne_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrne_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftintrp_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrp_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftintrm_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrm_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftint_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvffint_s_l, LARCH_V8SF_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvftintrz_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvftintrp_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvftintrm_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvftintrne_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvftinth_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintl_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvffinth_d_w, LARCH_V4DF_FTYPE_V8SI), ++ LASX_BUILTIN (xvffintl_d_w, LARCH_V4DF_FTYPE_V8SI), ++ LASX_BUILTIN (xvftintrzh_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrzl_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrph_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrpl_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrmh_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrml_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrneh_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrnel_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrne_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrne_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrintrz_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrz_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrintrp_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrp_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrintrm_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrm_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvld, LARCH_V32QI_FTYPE_CVPOINTER_SI), ++ LASX_NO_TARGET_BUILTIN (xvst, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI), ++ LASX_NO_TARGET_BUILTIN (xvstelm_b, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI_UQI), ++ LASX_NO_TARGET_BUILTIN (xvstelm_h, LARCH_VOID_FTYPE_V16HI_CVPOINTER_SI_UQI), ++ LASX_NO_TARGET_BUILTIN (xvstelm_w, LARCH_VOID_FTYPE_V8SI_CVPOINTER_SI_UQI), ++ LASX_NO_TARGET_BUILTIN (xvstelm_d, LARCH_VOID_FTYPE_V4DI_CVPOINTER_SI_UQI), ++ LASX_BUILTIN (xvinsve0_w, LARCH_V8SI_FTYPE_V8SI_V8SI_UQI), ++ LASX_BUILTIN (xvinsve0_d, LARCH_V4DI_FTYPE_V4DI_V4DI_UQI), ++ LASX_BUILTIN (xvpickve_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvpickve_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvpickve_w_f, LARCH_V8SF_FTYPE_V8SF_UQI), ++ LASX_BUILTIN (xvpickve_d_f, LARCH_V4DF_FTYPE_V4DF_UQI), ++ LASX_BUILTIN (xvssrlrn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssrlrn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssrlrn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrln_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssrln_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssrln_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvorn_v, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvldi, LARCH_V4DI_FTYPE_HI), ++ LASX_BUILTIN (xvldx, LARCH_V32QI_FTYPE_CVPOINTER_DI), ++ LASX_NO_TARGET_BUILTIN (xvstx, LARCH_VOID_FTYPE_V32QI_CVPOINTER_DI), ++ LASX_BUILTIN (xvextl_qu_du, LARCH_UV4DI_FTYPE_UV4DI), ++ ++ /* LASX */ ++ LASX_BUILTIN (xvinsgr2vr_w, LARCH_V8SI_FTYPE_V8SI_SI_UQI), ++ LASX_BUILTIN (xvinsgr2vr_d, LARCH_V4DI_FTYPE_V4DI_DI_UQI), ++ ++ LASX_BUILTIN (xvreplve0_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvreplve0_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvreplve0_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvreplve0_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvreplve0_q, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_h_b, LARCH_V16HI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_w_h, LARCH_V8SI_FTYPE_V16HI), ++ LASX_BUILTIN (vext2xv_d_w, LARCH_V4DI_FTYPE_V8SI), ++ LASX_BUILTIN (vext2xv_w_b, LARCH_V8SI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_d_h, LARCH_V4DI_FTYPE_V16HI), ++ LASX_BUILTIN (vext2xv_d_b, LARCH_V4DI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_hu_bu, LARCH_V16HI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_wu_hu, LARCH_V8SI_FTYPE_V16HI), ++ LASX_BUILTIN (vext2xv_du_wu, LARCH_V4DI_FTYPE_V8SI), ++ LASX_BUILTIN (vext2xv_wu_bu, LARCH_V8SI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_du_hu, LARCH_V4DI_FTYPE_V16HI), ++ LASX_BUILTIN (vext2xv_du_bu, LARCH_V4DI_FTYPE_V32QI), ++ LASX_BUILTIN (xvpermi_q, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvpermi_d, LARCH_V4DI_FTYPE_V4DI_USI), ++ LASX_BUILTIN (xvperm_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_b, LARCH_SI_FTYPE_UV32QI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_h, LARCH_SI_FTYPE_UV16HI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_w, LARCH_SI_FTYPE_UV8SI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_d, LARCH_SI_FTYPE_UV4DI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_b, LARCH_SI_FTYPE_UV32QI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_h, LARCH_SI_FTYPE_UV16HI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_w, LARCH_SI_FTYPE_UV8SI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_d, LARCH_SI_FTYPE_UV4DI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_v, LARCH_SI_FTYPE_UV32QI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_v, LARCH_SI_FTYPE_UV32QI), ++ LASX_BUILTIN (xvldrepl_b, LARCH_V32QI_FTYPE_CVPOINTER_SI), ++ LASX_BUILTIN (xvldrepl_h, LARCH_V16HI_FTYPE_CVPOINTER_SI), ++ LASX_BUILTIN (xvldrepl_w, LARCH_V8SI_FTYPE_CVPOINTER_SI), ++ LASX_BUILTIN (xvldrepl_d, LARCH_V4DI_FTYPE_CVPOINTER_SI), ++ LASX_BUILTIN (xvpickve2gr_w, LARCH_SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvpickve2gr_wu, LARCH_USI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvpickve2gr_d, LARCH_DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvpickve2gr_du, LARCH_UDI_FTYPE_V4DI_UQI), ++ ++ LASX_BUILTIN (xvaddwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvaddwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvaddwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvaddwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvaddwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvaddwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvaddwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvaddwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvsubwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsubwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsubwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsubwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsubwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsubwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvsubwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvsubwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmulwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmulwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmulwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmulwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmulwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmulwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmulwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmulwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvaddwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvaddwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvaddwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvaddwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvaddwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvaddwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvaddwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvaddwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvsubwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsubwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsubwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsubwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsubwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsubwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvsubwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvsubwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmulwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmulwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmulwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmulwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmulwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmulwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmulwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmulwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvaddwev_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), ++ LASX_BUILTIN (xvaddwev_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), ++ LASX_BUILTIN (xvaddwev_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), ++ LASX_BUILTIN (xvmulwev_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), ++ LASX_BUILTIN (xvmulwev_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), ++ LASX_BUILTIN (xvmulwev_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), ++ LASX_BUILTIN (xvaddwod_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), ++ LASX_BUILTIN (xvaddwod_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), ++ LASX_BUILTIN (xvaddwod_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), ++ LASX_BUILTIN (xvmulwod_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), ++ LASX_BUILTIN (xvmulwod_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), ++ LASX_BUILTIN (xvmulwod_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), ++ LASX_BUILTIN (xvhaddw_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvhaddw_qu_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvhsubw_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvhsubw_qu_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmaddwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvmaddwev_d_w, LARCH_V4DI_FTYPE_V4DI_V8SI_V8SI), ++ LASX_BUILTIN (xvmaddwev_w_h, LARCH_V8SI_FTYPE_V8SI_V16HI_V16HI), ++ LASX_BUILTIN (xvmaddwev_h_b, LARCH_V16HI_FTYPE_V16HI_V32QI_V32QI), ++ LASX_BUILTIN (xvmaddwev_q_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmaddwev_d_wu, LARCH_UV4DI_FTYPE_UV4DI_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmaddwev_w_hu, LARCH_UV8SI_FTYPE_UV8SI_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmaddwev_h_bu, LARCH_UV16HI_FTYPE_UV16HI_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmaddwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvmaddwod_d_w, LARCH_V4DI_FTYPE_V4DI_V8SI_V8SI), ++ LASX_BUILTIN (xvmaddwod_w_h, LARCH_V8SI_FTYPE_V8SI_V16HI_V16HI), ++ LASX_BUILTIN (xvmaddwod_h_b, LARCH_V16HI_FTYPE_V16HI_V32QI_V32QI), ++ LASX_BUILTIN (xvmaddwod_q_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmaddwod_d_wu, LARCH_UV4DI_FTYPE_UV4DI_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmaddwod_w_hu, LARCH_UV8SI_FTYPE_UV8SI_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmaddwod_h_bu, LARCH_UV16HI_FTYPE_UV16HI_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmaddwev_q_du_d, LARCH_V4DI_FTYPE_V4DI_UV4DI_V4DI), ++ LASX_BUILTIN (xvmaddwev_d_wu_w, LARCH_V4DI_FTYPE_V4DI_UV8SI_V8SI), ++ LASX_BUILTIN (xvmaddwev_w_hu_h, LARCH_V8SI_FTYPE_V8SI_UV16HI_V16HI), ++ LASX_BUILTIN (xvmaddwev_h_bu_b, LARCH_V16HI_FTYPE_V16HI_UV32QI_V32QI), ++ LASX_BUILTIN (xvmaddwod_q_du_d, LARCH_V4DI_FTYPE_V4DI_UV4DI_V4DI), ++ LASX_BUILTIN (xvmaddwod_d_wu_w, LARCH_V4DI_FTYPE_V4DI_UV8SI_V8SI), ++ LASX_BUILTIN (xvmaddwod_w_hu_h, LARCH_V8SI_FTYPE_V8SI_UV16HI_V16HI), ++ LASX_BUILTIN (xvmaddwod_h_bu_b, LARCH_V16HI_FTYPE_V16HI_UV32QI_V32QI), ++ LASX_BUILTIN (xvrotr_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvrotr_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvrotr_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvrotr_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvadd_q, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsub_q, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvaddwev_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), ++ LASX_BUILTIN (xvaddwod_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), ++ LASX_BUILTIN (xvmulwev_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), ++ LASX_BUILTIN (xvmulwod_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), ++ LASX_BUILTIN (xvmskgez_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvmsknz_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvexth_h_b, LARCH_V16HI_FTYPE_V32QI), ++ LASX_BUILTIN (xvexth_w_h, LARCH_V8SI_FTYPE_V16HI), ++ LASX_BUILTIN (xvexth_d_w, LARCH_V4DI_FTYPE_V8SI), ++ LASX_BUILTIN (xvexth_q_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvexth_hu_bu, LARCH_UV16HI_FTYPE_UV32QI), ++ LASX_BUILTIN (xvexth_wu_hu, LARCH_UV8SI_FTYPE_UV16HI), ++ LASX_BUILTIN (xvexth_du_wu, LARCH_UV4DI_FTYPE_UV8SI), ++ LASX_BUILTIN (xvexth_qu_du, LARCH_UV4DI_FTYPE_UV4DI), ++ LASX_BUILTIN (xvrotri_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvrotri_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvrotri_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvrotri_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvextl_q_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvsrlni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvsrlni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvsrlni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvsrlni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvsrlrni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvsrlrni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvsrlrni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvsrlrni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrlni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrlni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrlni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrlni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrlni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrlni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrlni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrlni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrlrni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrlrni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrlrni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrlrni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrlrni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrlrni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrlrni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrlrni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), ++ LASX_BUILTIN (xvsrani_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvsrani_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvsrani_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvsrani_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvsrarni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvsrarni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvsrarni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvsrarni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrani_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrani_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrani_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrani_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrani_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrani_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrani_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrani_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrarni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrarni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrarni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrarni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrarni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrarni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrarni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrarni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI) + }; + + /* Index I is the function declaration for loongarch_builtins[I], or null if +@@ -1446,11 +2502,15 @@ loongarch_builtin_vectorized_function (unsigned int fn, tree type_out, + { + if (out_n == 2 && in_n == 2) + return LARCH_GET_BUILTIN (lsx_vfrintrp_d); ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrp_d); + } + if (out_mode == SFmode && in_mode == SFmode) + { + if (out_n == 4 && in_n == 4) + return LARCH_GET_BUILTIN (lsx_vfrintrp_s); ++ if (out_n == 8 && in_n == 8) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrp_s); + } + break; + +@@ -1459,11 +2519,15 @@ loongarch_builtin_vectorized_function (unsigned int fn, tree type_out, + { + if (out_n == 2 && in_n == 2) + return LARCH_GET_BUILTIN (lsx_vfrintrz_d); ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrz_d); + } + if (out_mode == SFmode && in_mode == SFmode) + { + if (out_n == 4 && in_n == 4) + return LARCH_GET_BUILTIN (lsx_vfrintrz_s); ++ if (out_n == 8 && in_n == 8) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrz_s); + } + break; + +@@ -1473,11 +2537,15 @@ loongarch_builtin_vectorized_function (unsigned int fn, tree type_out, + { + if (out_n == 2 && in_n == 2) + return LARCH_GET_BUILTIN (lsx_vfrint_d); ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lasx_xvfrint_d); + } + if (out_mode == SFmode && in_mode == SFmode) + { + if (out_n == 4 && in_n == 4) + return LARCH_GET_BUILTIN (lsx_vfrint_s); ++ if (out_n == 8 && in_n == 8) ++ return LARCH_GET_BUILTIN (lasx_xvfrint_s); + } + break; + +@@ -1486,11 +2554,15 @@ loongarch_builtin_vectorized_function (unsigned int fn, tree type_out, + { + if (out_n == 2 && in_n == 2) + return LARCH_GET_BUILTIN (lsx_vfrintrm_d); ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrm_d); + } + if (out_mode == SFmode && in_mode == SFmode) + { + if (out_n == 4 && in_n == 4) + return LARCH_GET_BUILTIN (lsx_vfrintrm_s); ++ if (out_n == 8 && in_n == 8) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrm_s); + } + break; + +@@ -1565,6 +2637,30 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, + case CODE_FOR_lsx_vsubi_hu: + case CODE_FOR_lsx_vsubi_wu: + case CODE_FOR_lsx_vsubi_du: ++ case CODE_FOR_lasx_xvaddi_bu: ++ case CODE_FOR_lasx_xvaddi_hu: ++ case CODE_FOR_lasx_xvaddi_wu: ++ case CODE_FOR_lasx_xvaddi_du: ++ case CODE_FOR_lasx_xvslti_bu: ++ case CODE_FOR_lasx_xvslti_hu: ++ case CODE_FOR_lasx_xvslti_wu: ++ case CODE_FOR_lasx_xvslti_du: ++ case CODE_FOR_lasx_xvslei_bu: ++ case CODE_FOR_lasx_xvslei_hu: ++ case CODE_FOR_lasx_xvslei_wu: ++ case CODE_FOR_lasx_xvslei_du: ++ case CODE_FOR_lasx_xvmaxi_bu: ++ case CODE_FOR_lasx_xvmaxi_hu: ++ case CODE_FOR_lasx_xvmaxi_wu: ++ case CODE_FOR_lasx_xvmaxi_du: ++ case CODE_FOR_lasx_xvmini_bu: ++ case CODE_FOR_lasx_xvmini_hu: ++ case CODE_FOR_lasx_xvmini_wu: ++ case CODE_FOR_lasx_xvmini_du: ++ case CODE_FOR_lasx_xvsubi_bu: ++ case CODE_FOR_lasx_xvsubi_hu: ++ case CODE_FOR_lasx_xvsubi_wu: ++ case CODE_FOR_lasx_xvsubi_du: + gcc_assert (has_target_p && nops == 3); + /* We only generate a vector of constants iff the second argument + is an immediate. We also validate the range of the immediate. */ +@@ -1603,6 +2699,26 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, + case CODE_FOR_lsx_vmini_h: + case CODE_FOR_lsx_vmini_w: + case CODE_FOR_lsx_vmini_d: ++ case CODE_FOR_lasx_xvseqi_b: ++ case CODE_FOR_lasx_xvseqi_h: ++ case CODE_FOR_lasx_xvseqi_w: ++ case CODE_FOR_lasx_xvseqi_d: ++ case CODE_FOR_lasx_xvslti_b: ++ case CODE_FOR_lasx_xvslti_h: ++ case CODE_FOR_lasx_xvslti_w: ++ case CODE_FOR_lasx_xvslti_d: ++ case CODE_FOR_lasx_xvslei_b: ++ case CODE_FOR_lasx_xvslei_h: ++ case CODE_FOR_lasx_xvslei_w: ++ case CODE_FOR_lasx_xvslei_d: ++ case CODE_FOR_lasx_xvmaxi_b: ++ case CODE_FOR_lasx_xvmaxi_h: ++ case CODE_FOR_lasx_xvmaxi_w: ++ case CODE_FOR_lasx_xvmaxi_d: ++ case CODE_FOR_lasx_xvmini_b: ++ case CODE_FOR_lasx_xvmini_h: ++ case CODE_FOR_lasx_xvmini_w: ++ case CODE_FOR_lasx_xvmini_d: + gcc_assert (has_target_p && nops == 3); + /* We only generate a vector of constants iff the second argument + is an immediate. We also validate the range of the immediate. */ +@@ -1625,6 +2741,10 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, + case CODE_FOR_lsx_vori_b: + case CODE_FOR_lsx_vnori_b: + case CODE_FOR_lsx_vxori_b: ++ case CODE_FOR_lasx_xvandi_b: ++ case CODE_FOR_lasx_xvori_b: ++ case CODE_FOR_lasx_xvnori_b: ++ case CODE_FOR_lasx_xvxori_b: + gcc_assert (has_target_p && nops == 3); + if (!CONST_INT_P (ops[2].value)) + break; +@@ -1634,6 +2754,7 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, + break; + + case CODE_FOR_lsx_vbitseli_b: ++ case CODE_FOR_lasx_xvbitseli_b: + gcc_assert (has_target_p && nops == 4); + if (!CONST_INT_P (ops[3].value)) + break; +@@ -1646,6 +2767,10 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, + case CODE_FOR_lsx_vreplgr2vr_h: + case CODE_FOR_lsx_vreplgr2vr_w: + case CODE_FOR_lsx_vreplgr2vr_d: ++ case CODE_FOR_lasx_xvreplgr2vr_b: ++ case CODE_FOR_lasx_xvreplgr2vr_h: ++ case CODE_FOR_lasx_xvreplgr2vr_w: ++ case CODE_FOR_lasx_xvreplgr2vr_d: + /* Map the built-ins to vector fill operations. We need fix up the mode + for the element being inserted. */ + gcc_assert (has_target_p && nops == 2); +@@ -1674,6 +2799,26 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, + case CODE_FOR_lsx_vpickod_b: + case CODE_FOR_lsx_vpickod_h: + case CODE_FOR_lsx_vpickod_w: ++ case CODE_FOR_lasx_xvilvh_b: ++ case CODE_FOR_lasx_xvilvh_h: ++ case CODE_FOR_lasx_xvilvh_w: ++ case CODE_FOR_lasx_xvilvh_d: ++ case CODE_FOR_lasx_xvilvl_b: ++ case CODE_FOR_lasx_xvilvl_h: ++ case CODE_FOR_lasx_xvilvl_w: ++ case CODE_FOR_lasx_xvilvl_d: ++ case CODE_FOR_lasx_xvpackev_b: ++ case CODE_FOR_lasx_xvpackev_h: ++ case CODE_FOR_lasx_xvpackev_w: ++ case CODE_FOR_lasx_xvpackod_b: ++ case CODE_FOR_lasx_xvpackod_h: ++ case CODE_FOR_lasx_xvpackod_w: ++ case CODE_FOR_lasx_xvpickev_b: ++ case CODE_FOR_lasx_xvpickev_h: ++ case CODE_FOR_lasx_xvpickev_w: ++ case CODE_FOR_lasx_xvpickod_b: ++ case CODE_FOR_lasx_xvpickod_h: ++ case CODE_FOR_lasx_xvpickod_w: + /* Swap the operands 1 and 2 for interleave operations. Built-ins follow + convention of ISA, which have op1 as higher component and op2 as lower + component. However, the VEC_PERM op in tree and vec_concat in RTL +@@ -1695,6 +2840,18 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, + case CODE_FOR_lsx_vsrli_h: + case CODE_FOR_lsx_vsrli_w: + case CODE_FOR_lsx_vsrli_d: ++ case CODE_FOR_lasx_xvslli_b: ++ case CODE_FOR_lasx_xvslli_h: ++ case CODE_FOR_lasx_xvslli_w: ++ case CODE_FOR_lasx_xvslli_d: ++ case CODE_FOR_lasx_xvsrai_b: ++ case CODE_FOR_lasx_xvsrai_h: ++ case CODE_FOR_lasx_xvsrai_w: ++ case CODE_FOR_lasx_xvsrai_d: ++ case CODE_FOR_lasx_xvsrli_b: ++ case CODE_FOR_lasx_xvsrli_h: ++ case CODE_FOR_lasx_xvsrli_w: ++ case CODE_FOR_lasx_xvsrli_d: + gcc_assert (has_target_p && nops == 3); + if (CONST_INT_P (ops[2].value)) + { +@@ -1755,6 +2912,25 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, + INTVAL (ops[2].value)); + break; + ++ case CODE_FOR_lasx_xvinsgr2vr_w: ++ case CODE_FOR_lasx_xvinsgr2vr_d: ++ /* Map the built-ins to insert operations. We need to swap operands, ++ fix up the mode for the element being inserted, and generate ++ a bit mask for vec_merge. */ ++ gcc_assert (has_target_p && nops == 4); ++ std::swap (ops[1], ops[2]); ++ imode = GET_MODE_INNER (ops[0].mode); ++ ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode); ++ ops[1].mode = imode; ++ rangelo = 0; ++ rangehi = GET_MODE_NUNITS (ops[0].mode) - 1; ++ if (CONST_INT_P (ops[3].value) ++ && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi)) ++ ops[3].value = GEN_INT (1 << INTVAL (ops[3].value)); ++ else ++ error_opno = 2; ++ break; ++ + default: + break; + } +@@ -1864,12 +3040,14 @@ loongarch_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, + { + case LARCH_BUILTIN_DIRECT: + case LARCH_BUILTIN_LSX: ++ case LARCH_BUILTIN_LASX: + return loongarch_expand_builtin_direct (d->icode, target, exp, true); + + case LARCH_BUILTIN_DIRECT_NO_TARGET: + return loongarch_expand_builtin_direct (d->icode, target, exp, false); + + case LARCH_BUILTIN_LSX_TEST_BRANCH: ++ case LARCH_BUILTIN_LASX_TEST_BRANCH: + return loongarch_expand_builtin_lsx_test_branch (d->icode, exp); + } + gcc_unreachable (); +diff --git a/gcc/config/loongarch/loongarch-ftypes.def b/gcc/config/loongarch/loongarch-ftypes.def +index 2b0d50892..c7f849e88 100644 +--- a/gcc/config/loongarch/loongarch-ftypes.def ++++ b/gcc/config/loongarch/loongarch-ftypes.def +@@ -67,6 +67,7 @@ DEF_LARCH_FTYPE (3, (UDI, UDI, UDI, USI)) + DEF_LARCH_FTYPE (1, (DF, DF)) + DEF_LARCH_FTYPE (2, (DF, DF, DF)) + DEF_LARCH_FTYPE (1, (DF, V2DF)) ++DEF_LARCH_FTYPE (1, (DF, V4DF)) + + DEF_LARCH_FTYPE (1, (DI, DI)) + DEF_LARCH_FTYPE (1, (DI, SI)) +@@ -83,6 +84,7 @@ DEF_LARCH_FTYPE (2, (DI, SI, SI)) + DEF_LARCH_FTYPE (2, (DI, USI, USI)) + + DEF_LARCH_FTYPE (2, (DI, V2DI, UQI)) ++DEF_LARCH_FTYPE (2, (DI, V4DI, UQI)) + + DEF_LARCH_FTYPE (2, (INT, DF, DF)) + DEF_LARCH_FTYPE (2, (INT, SF, SF)) +@@ -104,21 +106,31 @@ DEF_LARCH_FTYPE (3, (SI, SI, SI, SI)) + DEF_LARCH_FTYPE (3, (SI, SI, SI, QI)) + DEF_LARCH_FTYPE (1, (SI, UQI)) + DEF_LARCH_FTYPE (1, (SI, UV16QI)) ++DEF_LARCH_FTYPE (1, (SI, UV32QI)) + DEF_LARCH_FTYPE (1, (SI, UV2DI)) ++DEF_LARCH_FTYPE (1, (SI, UV4DI)) + DEF_LARCH_FTYPE (1, (SI, UV4SI)) ++DEF_LARCH_FTYPE (1, (SI, UV8SI)) + DEF_LARCH_FTYPE (1, (SI, UV8HI)) ++DEF_LARCH_FTYPE (1, (SI, UV16HI)) + DEF_LARCH_FTYPE (2, (SI, V16QI, UQI)) ++DEF_LARCH_FTYPE (2, (SI, V32QI, UQI)) + DEF_LARCH_FTYPE (1, (SI, V2HI)) + DEF_LARCH_FTYPE (2, (SI, V2HI, V2HI)) + DEF_LARCH_FTYPE (1, (SI, V4QI)) + DEF_LARCH_FTYPE (2, (SI, V4QI, V4QI)) + DEF_LARCH_FTYPE (2, (SI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (SI, V8SI, UQI)) + DEF_LARCH_FTYPE (2, (SI, V8HI, UQI)) + DEF_LARCH_FTYPE (1, (SI, VOID)) + + DEF_LARCH_FTYPE (2, (UDI, UDI, UDI)) ++DEF_LARCH_FTYPE (2, (USI, V32QI, UQI)) + DEF_LARCH_FTYPE (2, (UDI, UV2SI, UV2SI)) ++DEF_LARCH_FTYPE (2, (USI, V8SI, UQI)) + DEF_LARCH_FTYPE (2, (UDI, V2DI, UQI)) ++DEF_LARCH_FTYPE (2, (USI, V16HI, UQI)) ++DEF_LARCH_FTYPE (2, (UDI, V4DI, UQI)) + + DEF_LARCH_FTYPE (2, (USI, V16QI, UQI)) + DEF_LARCH_FTYPE (2, (USI, V4SI, UQI)) +@@ -142,6 +154,23 @@ DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, V2DI)) + DEF_LARCH_FTYPE (2, (UV2DI, UV4SI, UV4SI)) + DEF_LARCH_FTYPE (1, (UV2DI, V2DF)) + ++DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, UQI)) ++DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, USI)) ++DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, UV32QI, UQI)) ++DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, UV32QI, USI)) ++DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, V32QI)) ++ ++DEF_LARCH_FTYPE (2, (UV4DI, UV4DI, UQI)) ++DEF_LARCH_FTYPE (2, (UV4DI, UV4DI, UV4DI)) ++DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV4DI, UQI)) ++DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV4DI, UV4DI)) ++DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (2, (UV4DI, UV4DI, V4DI)) ++DEF_LARCH_FTYPE (2, (UV4DI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (1, (UV4DI, V4DF)) ++ + DEF_LARCH_FTYPE (2, (UV2SI, UV2SI, UQI)) + DEF_LARCH_FTYPE (2, (UV2SI, UV2SI, UV2SI)) + +@@ -170,7 +199,22 @@ DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV8HI, UQI)) + DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV8HI, UV8HI)) + DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, V8HI)) + +- ++DEF_LARCH_FTYPE (2, (UV8SI, UV8SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV8SI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV8SI, UQI)) ++DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (UV8SI, UV8SI, V8SI)) ++DEF_LARCH_FTYPE (2, (UV8SI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (1, (UV8SI, V8SF)) ++ ++DEF_LARCH_FTYPE (2, (UV16HI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (2, (UV16HI, UV16HI, UQI)) ++DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (2, (UV16HI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, UV16HI, UQI)) ++DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (UV16HI, UV16HI, V16HI)) + + DEF_LARCH_FTYPE (2, (UV8QI, UV4HI, UV4HI)) + DEF_LARCH_FTYPE (1, (UV8QI, UV8QI)) +@@ -196,6 +240,25 @@ DEF_LARCH_FTYPE (4, (V16QI, V16QI, V16QI, UQI, UQI)) + DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, USI)) + DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, V16QI)) + ++DEF_LARCH_FTYPE (2, (V32QI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (2, (V32QI, CVPOINTER, DI)) ++DEF_LARCH_FTYPE (1, (V32QI, HI)) ++DEF_LARCH_FTYPE (1, (V32QI, SI)) ++DEF_LARCH_FTYPE (2, (V32QI, UV32QI, UQI)) ++DEF_LARCH_FTYPE (2, (V32QI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (1, (V32QI, V32QI)) ++DEF_LARCH_FTYPE (2, (V32QI, V32QI, QI)) ++DEF_LARCH_FTYPE (2, (V32QI, V32QI, SI)) ++DEF_LARCH_FTYPE (2, (V32QI, V32QI, UQI)) ++DEF_LARCH_FTYPE (2, (V32QI, V32QI, USI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, SI, UQI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, UQI, V32QI)) ++DEF_LARCH_FTYPE (2, (V32QI, V32QI, V32QI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, SI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, UQI)) ++DEF_LARCH_FTYPE (4, (V32QI, V32QI, V32QI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, USI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, V32QI)) + + DEF_LARCH_FTYPE (1, (V2DF, DF)) + DEF_LARCH_FTYPE (1, (V2DF, UV2DI)) +@@ -207,6 +270,16 @@ DEF_LARCH_FTYPE (1, (V2DF, V2DI)) + DEF_LARCH_FTYPE (1, (V2DF, V4SF)) + DEF_LARCH_FTYPE (1, (V2DF, V4SI)) + ++DEF_LARCH_FTYPE (1, (V4DF, DF)) ++DEF_LARCH_FTYPE (1, (V4DF, UV4DI)) ++DEF_LARCH_FTYPE (1, (V4DF, V4DF)) ++DEF_LARCH_FTYPE (2, (V4DF, V4DF, V4DF)) ++DEF_LARCH_FTYPE (3, (V4DF, V4DF, V4DF, V4DF)) ++DEF_LARCH_FTYPE (2, (V4DF, V4DF, V4DI)) ++DEF_LARCH_FTYPE (1, (V4DF, V4DI)) ++DEF_LARCH_FTYPE (1, (V4DF, V8SF)) ++DEF_LARCH_FTYPE (1, (V4DF, V8SI)) ++ + DEF_LARCH_FTYPE (2, (V2DI, CVPOINTER, SI)) + DEF_LARCH_FTYPE (1, (V2DI, DI)) + DEF_LARCH_FTYPE (1, (V2DI, HI)) +@@ -233,6 +306,32 @@ DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, V2DI)) + DEF_LARCH_FTYPE (3, (V2DI, V2DI, V4SI, V4SI)) + DEF_LARCH_FTYPE (2, (V2DI, V4SI, V4SI)) + ++DEF_LARCH_FTYPE (2, (V4DI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (1, (V4DI, DI)) ++DEF_LARCH_FTYPE (1, (V4DI, HI)) ++DEF_LARCH_FTYPE (2, (V4DI, UV4DI, UQI)) ++DEF_LARCH_FTYPE (2, (V4DI, UV4DI, UV4DI)) ++DEF_LARCH_FTYPE (2, (V4DI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (1, (V4DI, V4DF)) ++DEF_LARCH_FTYPE (2, (V4DI, V4DF, V4DF)) ++DEF_LARCH_FTYPE (1, (V4DI, V4DI)) ++DEF_LARCH_FTYPE (1, (UV4DI, UV4DI)) ++DEF_LARCH_FTYPE (2, (V4DI, V4DI, QI)) ++DEF_LARCH_FTYPE (2, (V4DI, V4DI, SI)) ++DEF_LARCH_FTYPE (2, (V4DI, V4DI, UQI)) ++DEF_LARCH_FTYPE (2, (V4DI, V4DI, USI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, DI, UQI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, UQI, V4DI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (2, (V4DI, V4DI, V4DI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, SI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, USI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, UQI)) ++DEF_LARCH_FTYPE (4, (V4DI, V4DI, V4DI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, V4DI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V8SI, V8SI)) ++DEF_LARCH_FTYPE (2, (V4DI, V8SI, V8SI)) ++ + DEF_LARCH_FTYPE (1, (V2HI, SI)) + DEF_LARCH_FTYPE (2, (V2HI, SI, SI)) + DEF_LARCH_FTYPE (3, (V2HI, SI, SI, SI)) +@@ -274,6 +373,17 @@ DEF_LARCH_FTYPE (3, (V4SF, V4SF, V4SF, V4SF)) + DEF_LARCH_FTYPE (2, (V4SF, V4SF, V4SI)) + DEF_LARCH_FTYPE (1, (V4SF, V4SI)) + DEF_LARCH_FTYPE (1, (V4SF, V8HI)) ++DEF_LARCH_FTYPE (1, (V8SF, V16HI)) ++ ++DEF_LARCH_FTYPE (1, (V8SF, SF)) ++DEF_LARCH_FTYPE (1, (V8SF, UV8SI)) ++DEF_LARCH_FTYPE (2, (V8SF, V4DF, V4DF)) ++DEF_LARCH_FTYPE (1, (V8SF, V8SF)) ++DEF_LARCH_FTYPE (2, (V8SF, V8SF, V8SF)) ++DEF_LARCH_FTYPE (3, (V8SF, V8SF, V8SF, V8SF)) ++DEF_LARCH_FTYPE (2, (V8SF, V8SF, V8SI)) ++DEF_LARCH_FTYPE (1, (V8SF, V8SI)) ++DEF_LARCH_FTYPE (1, (V8SF, V8HI)) + + DEF_LARCH_FTYPE (2, (V4SI, CVPOINTER, SI)) + DEF_LARCH_FTYPE (1, (V4SI, HI)) +@@ -282,6 +392,7 @@ DEF_LARCH_FTYPE (2, (V4SI, UV4SI, UQI)) + DEF_LARCH_FTYPE (2, (V4SI, UV4SI, UV4SI)) + DEF_LARCH_FTYPE (2, (V4SI, UV8HI, UV8HI)) + DEF_LARCH_FTYPE (2, (V4SI, V2DF, V2DF)) ++DEF_LARCH_FTYPE (2, (V8SI, V4DF, V4DF)) + DEF_LARCH_FTYPE (1, (V4SI, V4SF)) + DEF_LARCH_FTYPE (2, (V4SI, V4SF, V4SF)) + DEF_LARCH_FTYPE (1, (V4SI, V4SI)) +@@ -301,6 +412,32 @@ DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, V4SI)) + DEF_LARCH_FTYPE (3, (V4SI, V4SI, V8HI, V8HI)) + DEF_LARCH_FTYPE (2, (V4SI, V8HI, V8HI)) + ++DEF_LARCH_FTYPE (2, (V8SI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (1, (V8SI, HI)) ++DEF_LARCH_FTYPE (1, (V8SI, SI)) ++DEF_LARCH_FTYPE (2, (V8SI, UV8SI, UQI)) ++DEF_LARCH_FTYPE (2, (V8SI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (2, (V8SI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (V8SI, V2DF, V2DF)) ++DEF_LARCH_FTYPE (1, (V8SI, V8SF)) ++DEF_LARCH_FTYPE (2, (V8SI, V8SF, V8SF)) ++DEF_LARCH_FTYPE (1, (V8SI, V8SI)) ++DEF_LARCH_FTYPE (2, (V8SI, V8SI, QI)) ++DEF_LARCH_FTYPE (2, (V8SI, V8SI, SI)) ++DEF_LARCH_FTYPE (2, (V8SI, V8SI, UQI)) ++DEF_LARCH_FTYPE (2, (V8SI, V8SI, USI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, SI, UQI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, UQI, V8SI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (V8SI, V8SI, V8SI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, SI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, UQI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, USI)) ++DEF_LARCH_FTYPE (4, (V8SI, V8SI, V8SI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, V8SI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V16HI, V16HI)) ++DEF_LARCH_FTYPE (2, (V8SI, V16HI, V16HI)) ++ + DEF_LARCH_FTYPE (2, (V8HI, CVPOINTER, SI)) + DEF_LARCH_FTYPE (1, (V8HI, HI)) + DEF_LARCH_FTYPE (1, (V8HI, SI)) +@@ -326,6 +463,31 @@ DEF_LARCH_FTYPE (4, (V8HI, V8HI, V8HI, UQI, UQI)) + DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, USI)) + DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, V8HI)) + ++DEF_LARCH_FTYPE (2, (V16HI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (1, (V16HI, HI)) ++DEF_LARCH_FTYPE (1, (V16HI, SI)) ++DEF_LARCH_FTYPE (2, (V16HI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (2, (V16HI, UV16HI, UQI)) ++DEF_LARCH_FTYPE (2, (V16HI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (V16HI, V32QI, V32QI)) ++DEF_LARCH_FTYPE (2, (V16HI, V8SF, V8SF)) ++DEF_LARCH_FTYPE (1, (V16HI, V16HI)) ++DEF_LARCH_FTYPE (2, (V16HI, V16HI, QI)) ++DEF_LARCH_FTYPE (2, (V16HI, V16HI, SI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, SI, UQI)) ++DEF_LARCH_FTYPE (2, (V16HI, V16HI, UQI)) ++DEF_LARCH_FTYPE (2, (V16HI, V16HI, USI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, UQI, SI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, UQI, V16HI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V32QI, V32QI)) ++DEF_LARCH_FTYPE (2, (V16HI, V16HI, V16HI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, SI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, UQI)) ++DEF_LARCH_FTYPE (4, (V16HI, V16HI, V16HI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, USI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, V16HI)) ++ + DEF_LARCH_FTYPE (2, (V8QI, V4HI, V4HI)) + DEF_LARCH_FTYPE (1, (V8QI, V8QI)) + DEF_LARCH_FTYPE (2, (V8QI, V8QI, V8QI)) +@@ -337,62 +499,113 @@ DEF_LARCH_FTYPE (2, (VOID, USI, UQI)) + DEF_LARCH_FTYPE (1, (VOID, UHI)) + DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, SI)) + DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, DI)) ++DEF_LARCH_FTYPE (3, (VOID, V32QI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V32QI, CVPOINTER, DI)) ++DEF_LARCH_FTYPE (3, (VOID, V4DF, POINTER, SI)) + DEF_LARCH_FTYPE (3, (VOID, V2DF, POINTER, SI)) + DEF_LARCH_FTYPE (3, (VOID, V2DI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V4DI, CVPOINTER, SI)) + DEF_LARCH_FTYPE (2, (VOID, V2HI, V2HI)) + DEF_LARCH_FTYPE (2, (VOID, V4QI, V4QI)) + DEF_LARCH_FTYPE (3, (VOID, V4SF, POINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V8SF, POINTER, SI)) + DEF_LARCH_FTYPE (3, (VOID, V4SI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V8SI, CVPOINTER, SI)) + DEF_LARCH_FTYPE (3, (VOID, V8HI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V16HI, CVPOINTER, SI)) + ++DEF_LARCH_FTYPE (1, (V16HI, V32QI)) ++DEF_LARCH_FTYPE (1, (UV16HI, UV32QI)) ++DEF_LARCH_FTYPE (1, (V8SI, V32QI)) ++DEF_LARCH_FTYPE (1, (V4DI, V32QI)) + DEF_LARCH_FTYPE (1, (V8HI, V16QI)) + DEF_LARCH_FTYPE (1, (V4SI, V16QI)) + DEF_LARCH_FTYPE (1, (V2DI, V16QI)) ++DEF_LARCH_FTYPE (1, (UV8SI, UV16HI)) ++DEF_LARCH_FTYPE (1, (V8SI, V16HI)) ++DEF_LARCH_FTYPE (1, (V4DI, V16HI)) + DEF_LARCH_FTYPE (1, (V4SI, V8HI)) + DEF_LARCH_FTYPE (1, (V2DI, V8HI)) + DEF_LARCH_FTYPE (1, (V2DI, V4SI)) ++DEF_LARCH_FTYPE (1, (V4DI, V8SI)) ++DEF_LARCH_FTYPE (1, (UV4DI, UV8SI)) ++DEF_LARCH_FTYPE (1, (UV16HI, V32QI)) ++DEF_LARCH_FTYPE (1, (UV8SI, V32QI)) ++DEF_LARCH_FTYPE (1, (UV4DI, V32QI)) + DEF_LARCH_FTYPE (1, (UV8HI, V16QI)) + DEF_LARCH_FTYPE (1, (UV4SI, V16QI)) + DEF_LARCH_FTYPE (1, (UV2DI, V16QI)) ++DEF_LARCH_FTYPE (1, (UV8SI, V16HI)) ++DEF_LARCH_FTYPE (1, (UV4DI, V16HI)) + DEF_LARCH_FTYPE (1, (UV4SI, V8HI)) + DEF_LARCH_FTYPE (1, (UV2DI, V8HI)) + DEF_LARCH_FTYPE (1, (UV2DI, V4SI)) ++DEF_LARCH_FTYPE (1, (UV4DI, V8SI)) + DEF_LARCH_FTYPE (1, (UV8HI, UV16QI)) + DEF_LARCH_FTYPE (1, (UV4SI, UV16QI)) + DEF_LARCH_FTYPE (1, (UV2DI, UV16QI)) ++DEF_LARCH_FTYPE (1, (UV4DI, UV32QI)) + DEF_LARCH_FTYPE (1, (UV4SI, UV8HI)) + DEF_LARCH_FTYPE (1, (UV2DI, UV8HI)) + DEF_LARCH_FTYPE (1, (UV2DI, UV4SI)) + DEF_LARCH_FTYPE (2, (UV8HI, V16QI, V16QI)) + DEF_LARCH_FTYPE (2, (UV4SI, V8HI, V8HI)) + DEF_LARCH_FTYPE (2, (UV2DI, V4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (V16HI, V32QI, UQI)) ++DEF_LARCH_FTYPE (2, (V8SI, V16HI, UQI)) ++DEF_LARCH_FTYPE (2, (V4DI, V8SI, UQI)) + DEF_LARCH_FTYPE (2, (V8HI, V16QI, UQI)) + DEF_LARCH_FTYPE (2, (V4SI, V8HI, UQI)) + DEF_LARCH_FTYPE (2, (V2DI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV16HI, UV32QI, UQI)) ++DEF_LARCH_FTYPE (2, (UV8SI, UV16HI, UQI)) ++DEF_LARCH_FTYPE (2, (UV4DI, UV8SI, UQI)) + DEF_LARCH_FTYPE (2, (UV8HI, UV16QI, UQI)) + DEF_LARCH_FTYPE (2, (UV4SI, UV8HI, UQI)) + DEF_LARCH_FTYPE (2, (UV2DI, UV4SI, UQI)) ++DEF_LARCH_FTYPE (2, (V32QI, V16HI, V16HI)) ++DEF_LARCH_FTYPE (2, (V16HI, V8SI, V8SI)) ++DEF_LARCH_FTYPE (2, (V8SI, V4DI, V4DI)) + DEF_LARCH_FTYPE (2, (V16QI, V8HI, V8HI)) + DEF_LARCH_FTYPE (2, (V8HI, V4SI, V4SI)) + DEF_LARCH_FTYPE (2, (V4SI, V2DI, V2DI)) ++DEF_LARCH_FTYPE (2, (UV32QI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (UV16HI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (2, (UV8SI, UV4DI, UV4DI)) + DEF_LARCH_FTYPE (2, (UV16QI, UV8HI, UV8HI)) + DEF_LARCH_FTYPE (2, (UV8HI, UV4SI, UV4SI)) + DEF_LARCH_FTYPE (2, (UV4SI, UV2DI, UV2DI)) ++DEF_LARCH_FTYPE (2, (V32QI, V16HI, UQI)) ++DEF_LARCH_FTYPE (2, (V16HI, V8SI, UQI)) ++DEF_LARCH_FTYPE (2, (V8SI, V4DI, UQI)) + DEF_LARCH_FTYPE (2, (V16QI, V8HI, UQI)) + DEF_LARCH_FTYPE (2, (V8HI, V4SI, UQI)) + DEF_LARCH_FTYPE (2, (V4SI, V2DI, UQI)) ++DEF_LARCH_FTYPE (2, (UV32QI, UV16HI, UQI)) ++DEF_LARCH_FTYPE (2, (UV16HI, UV8SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV8SI, UV4DI, UQI)) + DEF_LARCH_FTYPE (2, (UV16QI, UV8HI, UQI)) + DEF_LARCH_FTYPE (2, (UV8HI, UV4SI, UQI)) + DEF_LARCH_FTYPE (2, (UV4SI, UV2DI, UQI)) ++DEF_LARCH_FTYPE (2, (V32QI, V32QI, DI)) + DEF_LARCH_FTYPE (2, (V16QI, V16QI, DI)) ++DEF_LARCH_FTYPE (2, (V32QI, UQI, UQI)) + DEF_LARCH_FTYPE (2, (V16QI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, UQI, UQI)) + DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, UQI)) + DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, UQI)) + DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, UQI)) + DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, UQI)) ++DEF_LARCH_FTYPE (2, (V8SF, V4DI, V4DI)) + DEF_LARCH_FTYPE (2, (V4SF, V2DI, V2DI)) ++DEF_LARCH_FTYPE (1, (V4DI, V8SF)) + DEF_LARCH_FTYPE (1, (V2DI, V4SF)) ++DEF_LARCH_FTYPE (2, (V4DI, UQI, USI)) + DEF_LARCH_FTYPE (2, (V2DI, UQI, USI)) ++DEF_LARCH_FTYPE (2, (V4DI, UQI, UQI)) + DEF_LARCH_FTYPE (2, (V2DI, UQI, UQI)) + DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V16QI, CVPOINTER)) + DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V8HI, CVPOINTER)) +@@ -402,6 +615,17 @@ DEF_LARCH_FTYPE (2, (V16QI, SI, CVPOINTER)) + DEF_LARCH_FTYPE (2, (V8HI, SI, CVPOINTER)) + DEF_LARCH_FTYPE (2, (V4SI, SI, CVPOINTER)) + DEF_LARCH_FTYPE (2, (V2DI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, V32QI, UQI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, V16HI, UQI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, V8SI, UQI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, V4DI, UQI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (3, (VOID, V32QI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V32QI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V16HI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V8SI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V4DI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (1, (V32QI, POINTER)) ++DEF_LARCH_FTYPE (2, (VOID, V32QI, POINTER)) + DEF_LARCH_FTYPE (2, (V8HI, UV16QI, V16QI)) + DEF_LARCH_FTYPE (2, (V16QI, V16QI, UV16QI)) + DEF_LARCH_FTYPE (2, (UV16QI, V16QI, UV16QI)) +@@ -431,6 +655,33 @@ DEF_LARCH_FTYPE (3, (V4SI, V4SI, V16QI, V16QI)) + DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV16QI, V16QI)) + DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV16QI, UV16QI)) + ++ ++DEF_LARCH_FTYPE(2,(V4DI,V16HI,V16HI)) ++DEF_LARCH_FTYPE(2,(V4DI,UV4SI,V4SI)) ++DEF_LARCH_FTYPE(2,(V8SI,UV16HI,V16HI)) ++DEF_LARCH_FTYPE(2,(V16HI,UV32QI,V32QI)) ++DEF_LARCH_FTYPE(2,(V4DI,UV8SI,V8SI)) ++DEF_LARCH_FTYPE(3,(V4DI,V4DI,V16HI,V16HI)) ++DEF_LARCH_FTYPE(2,(UV32QI,V32QI,UV32QI)) ++DEF_LARCH_FTYPE(2,(UV16HI,V16HI,UV16HI)) ++DEF_LARCH_FTYPE(2,(UV8SI,V8SI,UV8SI)) ++DEF_LARCH_FTYPE(2,(UV4DI,V4DI,UV4DI)) ++DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV4DI,V4DI)) ++DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV8SI,V8SI)) ++DEF_LARCH_FTYPE(3,(V8SI,V8SI,UV16HI,V16HI)) ++DEF_LARCH_FTYPE(3,(V16HI,V16HI,UV32QI,V32QI)) ++DEF_LARCH_FTYPE(2,(V4DI,UV4DI,V4DI)) ++DEF_LARCH_FTYPE(2,(V8SI,V32QI,V32QI)) ++DEF_LARCH_FTYPE(2,(UV4DI,UV16HI,UV16HI)) ++DEF_LARCH_FTYPE(2,(V4DI,UV16HI,V16HI)) ++DEF_LARCH_FTYPE(3,(V8SI,V8SI,V32QI,V32QI)) ++DEF_LARCH_FTYPE(3,(UV8SI,UV8SI,UV32QI,UV32QI)) ++DEF_LARCH_FTYPE(3,(UV4DI,UV4DI,UV16HI,UV16HI)) ++DEF_LARCH_FTYPE(3,(V8SI,V8SI,UV32QI,V32QI)) ++DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV16HI,V16HI)) ++DEF_LARCH_FTYPE(2,(UV8SI,UV32QI,UV32QI)) ++DEF_LARCH_FTYPE(2,(V8SI,UV32QI,V32QI)) ++ + DEF_LARCH_FTYPE(4,(VOID,V16QI,CVPOINTER,SI,UQI)) + DEF_LARCH_FTYPE(4,(VOID,V8HI,CVPOINTER,SI,UQI)) + DEF_LARCH_FTYPE(4,(VOID,V4SI,CVPOINTER,SI,UQI)) +@@ -448,11 +699,29 @@ DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, V8HI, USI)) + DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, V4SI, USI)) + DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, V2DI, USI)) + ++DEF_LARCH_FTYPE (2, (DI, V8SI, UQI)) ++DEF_LARCH_FTYPE (2, (UDI, V8SI, UQI)) ++ ++DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, V32QI, USI)) ++DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, V16HI, USI)) ++DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, V8SI, USI)) ++DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, V4DI, USI)) ++ ++DEF_LARCH_FTYPE(4,(VOID,V32QI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE(4,(VOID,V16HI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE(4,(VOID,V8SI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE(4,(VOID,V4DI,CVPOINTER,SI,UQI)) ++ + DEF_LARCH_FTYPE (1, (BOOLEAN,V16QI)) + DEF_LARCH_FTYPE(2,(V16QI,CVPOINTER,CVPOINTER)) + DEF_LARCH_FTYPE(3,(VOID,V16QI,CVPOINTER,CVPOINTER)) ++DEF_LARCH_FTYPE(2,(V32QI,CVPOINTER,CVPOINTER)) ++DEF_LARCH_FTYPE(3,(VOID,V32QI,CVPOINTER,CVPOINTER)) + + DEF_LARCH_FTYPE (3, (V16QI, V16QI, SI, UQI)) + DEF_LARCH_FTYPE (3, (V2DI, V2DI, SI, UQI)) + DEF_LARCH_FTYPE (3, (V2DI, V2DI, DI, UQI)) + DEF_LARCH_FTYPE (3, (V4SI, V4SI, SI, UQI)) ++ ++DEF_LARCH_FTYPE (2, (V8SF, V8SF, UQI)) ++DEF_LARCH_FTYPE (2, (V4DF, V4DF, UQI)) +-- +2.33.0 + diff --git a/LoongArch-Add-Loongson-SX-base-instruction-support.patch b/LoongArch-Add-Loongson-SX-base-instruction-support.patch new file mode 100644 index 0000000000000000000000000000000000000000..1f1fc8c8bd95999e956348fb2c92cb070a9c82b8 --- /dev/null +++ b/LoongArch-Add-Loongson-SX-base-instruction-support.patch @@ -0,0 +1,8433 @@ +From 0b4626bb55886081e90922cf6d6869d551847a47 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Thu, 16 Mar 2023 16:29:42 +0800 +Subject: [PATCH 063/124] LoongArch: Add Loongson SX base instruction support. + +gcc/ChangeLog: + + * config/loongarch/constraints.md (M): Add Loongson LSX base instruction support. + (N): Ditto. + (O): Ditto. + (P): Ditto. + (R): Ditto. + (S): Ditto. + (YG): Ditto. + (YA): Ditto. + (YB): Ditto. + (Yb): Ditto. + (Yh): Ditto. + (Yw): Ditto. + (YI): Ditto. + (YC): Ditto. + (YZ): Ditto. + (Unv5): Ditto. + (Uuv5): Ditto. + (Usv5): Ditto. + (Uuv6): Ditto. + (Urv8): Ditto. + * config/loongarch/genopts/loongarch.opt.in: Ditto. + * config/loongarch/loongarch-builtins.cc (loongarch_gen_const_int_vector): Ditto. + * config/loongarch/loongarch-modes.def (VECTOR_MODES): Ditto. + (VECTOR_MODE): Ditto. + (INT_MODE): Ditto. + * config/loongarch/loongarch-protos.h (loongarch_split_move_insn_p): Ditto. + (loongarch_split_move_insn): Ditto. + (loongarch_split_128bit_move): Ditto. + (loongarch_split_128bit_move_p): Ditto. + (loongarch_split_lsx_copy_d): Ditto. + (loongarch_split_lsx_insert_d): Ditto. + (loongarch_split_lsx_fill_d): Ditto. + (loongarch_expand_vec_cmp): Ditto. + (loongarch_const_vector_same_val_p): Ditto. + (loongarch_const_vector_same_bytes_p): Ditto. + (loongarch_const_vector_same_int_p): Ditto. + (loongarch_const_vector_shuffle_set_p): Ditto. + (loongarch_const_vector_bitimm_set_p): Ditto. + (loongarch_const_vector_bitimm_clr_p): Ditto. + (loongarch_lsx_vec_parallel_const_half): Ditto. + (loongarch_gen_const_int_vector): Ditto. + (loongarch_lsx_output_division): Ditto. + (loongarch_expand_vector_init): Ditto. + (loongarch_expand_vec_unpack): Ditto. + (loongarch_expand_vec_perm): Ditto. + (loongarch_expand_vector_extract): Ditto. + (loongarch_expand_vector_reduc): Ditto. + (loongarch_ldst_scaled_shift): Ditto. + (loongarch_expand_vec_cond_expr): Ditto. + (loongarch_expand_vec_cond_mask_expr): Ditto. + (loongarch_builtin_vectorized_function): Ditto. + (loongarch_gen_const_int_vector_shuffle): Ditto. + (loongarch_build_signbit_mask): Ditto. + * config/loongarch/loongarch.cc (loongarch_pass_aggregate_num_fpr): Ditto. + (loongarch_setup_incoming_varargs): Ditto. + (loongarch_emit_move): Ditto. + (loongarch_const_vector_bitimm_set_p): Ditto. + (loongarch_const_vector_bitimm_clr_p): Ditto. + (loongarch_const_vector_same_val_p): Ditto. + (loongarch_const_vector_same_bytes_p): Ditto. + (loongarch_const_vector_same_int_p): Ditto. + (loongarch_const_vector_shuffle_set_p): Ditto. + (loongarch_symbol_insns): Ditto. + (loongarch_cannot_force_const_mem): Ditto. + (loongarch_valid_offset_p): Ditto. + (loongarch_valid_index_p): Ditto. + (loongarch_classify_address): Ditto. + (loongarch_address_insns): Ditto. + (loongarch_ldst_scaled_shift): Ditto. + (loongarch_const_insns): Ditto. + (loongarch_split_move_insn_p): Ditto. + (loongarch_subword_at_byte): Ditto. + (loongarch_legitimize_move): Ditto. + (loongarch_builtin_vectorization_cost): Ditto. + (loongarch_split_move_p): Ditto. + (loongarch_split_move): Ditto. + (loongarch_split_move_insn): Ditto. + (loongarch_output_move_index_float): Ditto. + (loongarch_split_128bit_move_p): Ditto. + (loongarch_split_128bit_move): Ditto. + (loongarch_split_lsx_copy_d): Ditto. + (loongarch_split_lsx_insert_d): Ditto. + (loongarch_split_lsx_fill_d): Ditto. + (loongarch_output_move): Ditto. + (loongarch_extend_comparands): Ditto. + (loongarch_print_operand_reloc): Ditto. + (loongarch_print_operand): Ditto. + (loongarch_hard_regno_mode_ok_uncached): Ditto. + (loongarch_hard_regno_call_part_clobbered): Ditto. + (loongarch_hard_regno_nregs): Ditto. + (loongarch_class_max_nregs): Ditto. + (loongarch_can_change_mode_class): Ditto. + (loongarch_mode_ok_for_mov_fmt_p): Ditto. + (loongarch_secondary_reload): Ditto. + (loongarch_vector_mode_supported_p): Ditto. + (loongarch_preferred_simd_mode): Ditto. + (loongarch_autovectorize_vector_modes): Ditto. + (loongarch_lsx_output_division): Ditto. + (loongarch_option_override_internal): Ditto. + (loongarch_hard_regno_caller_save_mode): Ditto. + (MAX_VECT_LEN): Ditto. + (loongarch_spill_class): Ditto. + (struct expand_vec_perm_d): Ditto. + (loongarch_promote_function_mode): Ditto. + (loongarch_expand_vselect): Ditto. + (loongarch_starting_frame_offset): Ditto. + (loongarch_expand_vselect_vconcat): Ditto. + (TARGET_ASM_ALIGNED_DI_OP): Ditto. + (TARGET_OPTION_OVERRIDE): Ditto. + (TARGET_LEGITIMIZE_ADDRESS): Ditto. + (TARGET_ASM_SELECT_RTX_SECTION): Ditto. + (TARGET_ASM_FUNCTION_RODATA_SECTION): Ditto. + (loongarch_expand_lsx_shuffle): Ditto. + (TARGET_SCHED_INIT): Ditto. + (TARGET_SCHED_REORDER): Ditto. + (TARGET_SCHED_REORDER2): Ditto. + (TARGET_SCHED_VARIABLE_ISSUE): Ditto. + (TARGET_SCHED_ADJUST_COST): Ditto. + (TARGET_SCHED_ISSUE_RATE): Ditto. + (TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD): Ditto. + (TARGET_FUNCTION_OK_FOR_SIBCALL): Ditto. + (TARGET_VALID_POINTER_MODE): Ditto. + (TARGET_REGISTER_MOVE_COST): Ditto. + (TARGET_MEMORY_MOVE_COST): Ditto. + (TARGET_RTX_COSTS): Ditto. + (TARGET_ADDRESS_COST): Ditto. + (TARGET_IN_SMALL_DATA_P): Ditto. + (TARGET_PREFERRED_RELOAD_CLASS): Ditto. + (TARGET_ASM_FILE_START_FILE_DIRECTIVE): Ditto. + (TARGET_EXPAND_BUILTIN_VA_START): Ditto. + (loongarch_expand_vec_perm): Ditto. + (TARGET_PROMOTE_FUNCTION_MODE): Ditto. + (TARGET_RETURN_IN_MEMORY): Ditto. + (TARGET_FUNCTION_VALUE): Ditto. + (TARGET_LIBCALL_VALUE): Ditto. + (loongarch_try_expand_lsx_vshuf_const): Ditto. + (TARGET_ASM_OUTPUT_MI_THUNK): Ditto. + (TARGET_ASM_CAN_OUTPUT_MI_THUNK): Ditto. + (TARGET_PRINT_OPERAND): Ditto. + (TARGET_PRINT_OPERAND_ADDRESS): Ditto. + (TARGET_PRINT_OPERAND_PUNCT_VALID_P): Ditto. + (TARGET_SETUP_INCOMING_VARARGS): Ditto. + (TARGET_STRICT_ARGUMENT_NAMING): Ditto. + (TARGET_MUST_PASS_IN_STACK): Ditto. + (TARGET_PASS_BY_REFERENCE): Ditto. + (TARGET_ARG_PARTIAL_BYTES): Ditto. + (TARGET_FUNCTION_ARG): Ditto. + (TARGET_FUNCTION_ARG_ADVANCE): Ditto. + (TARGET_FUNCTION_ARG_BOUNDARY): Ditto. + (TARGET_SCALAR_MODE_SUPPORTED_P): Ditto. + (TARGET_INIT_BUILTINS): Ditto. + (loongarch_expand_vec_perm_const_1): Ditto. + (loongarch_expand_vec_perm_const_2): Ditto. + (loongarch_vectorize_vec_perm_const): Ditto. + (loongarch_cpu_sched_reassociation_width): Ditto. + (loongarch_sched_reassociation_width): Ditto. + (loongarch_expand_vector_extract): Ditto. + (emit_reduc_half): Ditto. + (loongarch_expand_vector_reduc): Ditto. + (loongarch_expand_vec_unpack): Ditto. + (loongarch_lsx_vec_parallel_const_half): Ditto. + (loongarch_constant_elt_p): Ditto. + (loongarch_gen_const_int_vector_shuffle): Ditto. + (loongarch_expand_vector_init): Ditto. + (loongarch_expand_lsx_cmp): Ditto. + (loongarch_expand_vec_cond_expr): Ditto. + (loongarch_expand_vec_cond_mask_expr): Ditto. + (loongarch_expand_vec_cmp): Ditto. + (loongarch_case_values_threshold): Ditto. + (loongarch_build_const_vector): Ditto. + (loongarch_build_signbit_mask): Ditto. + (loongarch_builtin_support_vector_misalignment): Ditto. + (TARGET_ASM_ALIGNED_HI_OP): Ditto. + (TARGET_ASM_ALIGNED_SI_OP): Ditto. + (TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST): Ditto. + (TARGET_VECTOR_MODE_SUPPORTED_P): Ditto. + (TARGET_VECTORIZE_PREFERRED_SIMD_MODE): Ditto. + (TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_MODES): Ditto. + (TARGET_VECTORIZE_VEC_PERM_CONST): Ditto. + (TARGET_SCHED_REASSOCIATION_WIDTH): Ditto. + (TARGET_CASE_VALUES_THRESHOLD): Ditto. + (TARGET_HARD_REGNO_CALL_PART_CLOBBERED): Ditto. + (TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT): Ditto. + * config/loongarch/loongarch.h (TARGET_SUPPORTS_WIDE_INT): Ditto. + (UNITS_PER_LSX_REG): Ditto. + (BITS_PER_LSX_REG): Ditto. + (BIGGEST_ALIGNMENT): Ditto. + (LSX_REG_FIRST): Ditto. + (LSX_REG_LAST): Ditto. + (LSX_REG_NUM): Ditto. + (LSX_REG_P): Ditto. + (LSX_REG_RTX_P): Ditto. + (IMM13_OPERAND): Ditto. + (LSX_SUPPORTED_MODE_P): Ditto. + * config/loongarch/loongarch.md (unknown,add,sub,not,nor,and,or,xor): Ditto. + (unknown,add,sub,not,nor,and,or,xor,simd_add): Ditto. + (unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FCC): Ditto. + (mode" ): Ditto. + (DF): Ditto. + (SF): Ditto. + (sf): Ditto. + (DI): Ditto. + (SI): Ditto. + * config/loongarch/loongarch.opt: Ditto. + * config/loongarch/predicates.md (const_lsx_branch_operand): Ditto. + (const_uimm3_operand): Ditto. + (const_8_to_11_operand): Ditto. + (const_12_to_15_operand): Ditto. + (const_uimm4_operand): Ditto. + (const_uimm6_operand): Ditto. + (const_uimm7_operand): Ditto. + (const_uimm8_operand): Ditto. + (const_imm5_operand): Ditto. + (const_imm10_operand): Ditto. + (const_imm13_operand): Ditto. + (reg_imm10_operand): Ditto. + (aq8b_operand): Ditto. + (aq8h_operand): Ditto. + (aq8w_operand): Ditto. + (aq8d_operand): Ditto. + (aq10b_operand): Ditto. + (aq10h_operand): Ditto. + (aq10w_operand): Ditto. + (aq10d_operand): Ditto. + (aq12b_operand): Ditto. + (aq12h_operand): Ditto. + (aq12w_operand): Ditto. + (aq12d_operand): Ditto. + (const_m1_operand): Ditto. + (reg_or_m1_operand): Ditto. + (const_exp_2_operand): Ditto. + (const_exp_4_operand): Ditto. + (const_exp_8_operand): Ditto. + (const_exp_16_operand): Ditto. + (const_exp_32_operand): Ditto. + (const_0_or_1_operand): Ditto. + (const_0_to_3_operand): Ditto. + (const_0_to_7_operand): Ditto. + (const_2_or_3_operand): Ditto. + (const_4_to_7_operand): Ditto. + (const_8_to_15_operand): Ditto. + (const_16_to_31_operand): Ditto. + (qi_mask_operand): Ditto. + (hi_mask_operand): Ditto. + (si_mask_operand): Ditto. + (d_operand): Ditto. + (db4_operand): Ditto. + (db7_operand): Ditto. + (db8_operand): Ditto. + (ib3_operand): Ditto. + (sb4_operand): Ditto. + (sb5_operand): Ditto. + (sb8_operand): Ditto. + (sd8_operand): Ditto. + (ub4_operand): Ditto. + (ub8_operand): Ditto. + (uh4_operand): Ditto. + (uw4_operand): Ditto. + (uw5_operand): Ditto. + (uw6_operand): Ditto. + (uw8_operand): Ditto. + (addiur2_operand): Ditto. + (addiusp_operand): Ditto. + (andi16_operand): Ditto. + (movep_src_register): Ditto. + (movep_src_operand): Ditto. + (fcc_reload_operand): Ditto. + (muldiv_target_operand): Ditto. + (const_vector_same_val_operand): Ditto. + (const_vector_same_simm5_operand): Ditto. + (const_vector_same_uimm5_operand): Ditto. + (const_vector_same_ximm5_operand): Ditto. + (const_vector_same_uimm6_operand): Ditto. + (par_const_vector_shf_set_operand): Ditto. + (reg_or_vector_same_val_operand): Ditto. + (reg_or_vector_same_simm5_operand): Ditto. + (reg_or_vector_same_uimm5_operand): Ditto. + (reg_or_vector_same_ximm5_operand): Ditto. + (reg_or_vector_same_uimm6_operand): Ditto. + * doc/md.texi: Ditto. + * config/loongarch/lsx.md: New file. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/constraints.md | 131 +- + gcc/config/loongarch/genopts/loongarch.opt.in | 4 + + gcc/config/loongarch/loongarch-builtins.cc | 10 + + gcc/config/loongarch/loongarch-modes.def | 38 + + gcc/config/loongarch/loongarch-protos.h | 31 + + gcc/config/loongarch/loongarch.cc | 2226 +++++++- + gcc/config/loongarch/loongarch.h | 65 +- + gcc/config/loongarch/loongarch.md | 44 +- + gcc/config/loongarch/loongarch.opt | 4 + + gcc/config/loongarch/lsx.md | 4467 +++++++++++++++++ + gcc/config/loongarch/predicates.md | 333 +- + gcc/doc/md.texi | 11 + + 12 files changed, 7181 insertions(+), 183 deletions(-) + create mode 100644 gcc/config/loongarch/lsx.md + +diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md +index 25f3cda35..cec5d8857 100644 +--- a/gcc/config/loongarch/constraints.md ++++ b/gcc/config/loongarch/constraints.md +@@ -76,12 +76,13 @@ + ;; "Le" + ;; "A signed 32-bit constant can be expressed as Lb + I, but not a + ;; single Lb or I." +-;; "M" <-----unused +-;; "N" <-----unused +-;; "O" <-----unused +-;; "P" <-----unused ++;; "M" "A constant that cannot be loaded using @code{lui}, @code{addiu} ++;; or @code{ori}." ++;; "N" "A constant in the range -65535 to -1 (inclusive)." ++;; "O" "A signed 15-bit constant." ++;; "P" "A constant in the range 1 to 65535 (inclusive)." + ;; "Q" <-----unused +-;; "R" <-----unused ++;; "R" "An address that can be used in a non-macro load or store." + ;; "S" <-----unused + ;; "T" <-----unused + ;; "U" <-----unused +@@ -214,6 +215,63 @@ + (and (match_code "const_int") + (match_test "loongarch_addu16i_imm12_operand_p (ival, SImode)"))) + ++(define_constraint "M" ++ "A constant that cannot be loaded using @code{lui}, @code{addiu} ++ or @code{ori}." ++ (and (match_code "const_int") ++ (not (match_test "IMM12_OPERAND (ival)")) ++ (not (match_test "IMM12_OPERAND_UNSIGNED (ival)")) ++ (not (match_test "LU12I_OPERAND (ival)")))) ++ ++(define_constraint "N" ++ "A constant in the range -65535 to -1 (inclusive)." ++ (and (match_code "const_int") ++ (match_test "ival >= -0xffff && ival < 0"))) ++ ++(define_constraint "O" ++ "A signed 15-bit constant." ++ (and (match_code "const_int") ++ (match_test "ival >= -0x4000 && ival < 0x4000"))) ++ ++(define_constraint "P" ++ "A constant in the range 1 to 65535 (inclusive)." ++ (and (match_code "const_int") ++ (match_test "ival > 0 && ival < 0x10000"))) ++ ++;; General constraints ++ ++(define_memory_constraint "R" ++ "An address that can be used in a non-macro load or store." ++ (and (match_code "mem") ++ (match_test "loongarch_address_insns (XEXP (op, 0), mode, false) == 1"))) ++(define_constraint "S" ++ "@internal ++ A constant call address." ++ (and (match_operand 0 "call_insn_operand") ++ (match_test "CONSTANT_P (op)"))) ++ ++(define_constraint "YG" ++ "@internal ++ A vector zero." ++ (and (match_code "const_vector") ++ (match_test "op == CONST0_RTX (mode)"))) ++ ++(define_constraint "YA" ++ "@internal ++ An unsigned 6-bit constant." ++ (and (match_code "const_int") ++ (match_test "UIMM6_OPERAND (ival)"))) ++ ++(define_constraint "YB" ++ "@internal ++ A signed 10-bit constant." ++ (and (match_code "const_int") ++ (match_test "IMM10_OPERAND (ival)"))) ++ ++(define_constraint "Yb" ++ "@internal" ++ (match_operand 0 "qi_mask_operand")) ++ + (define_constraint "Yd" + "@internal + A constant @code{move_operand} that can be safely loaded using +@@ -221,10 +279,73 @@ + (and (match_operand 0 "move_operand") + (match_test "CONSTANT_P (op)"))) + ++(define_constraint "Yh" ++ "@internal" ++ (match_operand 0 "hi_mask_operand")) ++ ++(define_constraint "Yw" ++ "@internal" ++ (match_operand 0 "si_mask_operand")) ++ + (define_constraint "Yx" + "@internal" + (match_operand 0 "low_bitmask_operand")) + ++(define_constraint "YI" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [-512,511]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, -512, 511)"))) ++ ++(define_constraint "YC" ++ "@internal ++ A replicated vector const in which the replicated value has a single ++ bit set." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_bitimm_set_p (op, mode)"))) ++ ++(define_constraint "YZ" ++ "@internal ++ A replicated vector const in which the replicated value has a single ++ bit clear." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_bitimm_clr_p (op, mode)"))) ++ ++(define_constraint "Unv5" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [-31,0]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, -31, 0)"))) ++ ++(define_constraint "Uuv5" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [0,31]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, 0, 31)"))) ++ ++(define_constraint "Usv5" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [-16,15]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, -16, 15)"))) ++ ++(define_constraint "Uuv6" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [0,63]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, 0, 63)"))) ++ ++(define_constraint "Urv8" ++ "@internal ++ A replicated vector const with replicated byte values as well as elements" ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_bytes_p (op, mode)"))) ++ + (define_memory_constraint "ZC" + "A memory operand whose address is formed by a base register and offset + that is suitable for use in instructions with the same addressing mode +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +index c6e337d05..c53785a37 100644 +--- a/gcc/config/loongarch/genopts/loongarch.opt.in ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -146,6 +146,10 @@ mbranch-cost= + Target RejectNegative Joined UInteger Var(loongarch_branch_cost) + -mbranch-cost=COST Set the cost of branches to roughly COST instructions. + ++mmemvec-cost= ++Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) ++mmemvec-cost=COST Set the cost of vector memory access instructions. ++ + mcheck-zero-division + Target Mask(CHECK_ZERO_DIV) + Trap on integer divide by zero. +diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc +index cb0ea1664..c8548a07f 100644 +--- a/gcc/config/loongarch/loongarch-builtins.cc ++++ b/gcc/config/loongarch/loongarch-builtins.cc +@@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see + #include "fold-const.h" + #include "expr.h" + #include "langhooks.h" ++#include "emit-rtl.h" + + /* Macros to create an enumeration identifier for a function prototype. */ + #define LARCH_FTYPE_NAME1(A, B) LARCH_##A##_FTYPE_##B +@@ -302,6 +303,15 @@ loongarch_prepare_builtin_arg (struct expand_operand *op, tree exp, + create_input_operand (op, value, TYPE_MODE (TREE_TYPE (arg))); + } + ++/* Return a const_int vector of VAL with mode MODE. */ ++ ++rtx ++loongarch_gen_const_int_vector (machine_mode mode, HOST_WIDE_INT val) ++{ ++ rtx c = gen_int_mode (val, GET_MODE_INNER (mode)); ++ return gen_const_vec_duplicate (mode, c); ++} ++ + /* Expand instruction ICODE as part of a built-in function sequence. + Use the first NOPS elements of OPS as the instruction's operands. + HAS_TARGET_P is true if operand 0 is a target; it is false if the +diff --git a/gcc/config/loongarch/loongarch-modes.def b/gcc/config/loongarch/loongarch-modes.def +index 7f06e2d65..b69ad3d83 100644 +--- a/gcc/config/loongarch/loongarch-modes.def ++++ b/gcc/config/loongarch/loongarch-modes.def +@@ -23,3 +23,41 @@ FLOAT_MODE (TF, 16, ieee_quad_format); + + /* For floating point conditions in FCC registers. */ + CC_MODE (FCC); ++ ++/* Vector modes. */ ++VECTOR_MODES (INT, 4); /* V4QI V2HI */ ++VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */ ++VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */ ++ ++/* For LARCH LSX 128 bits. */ ++VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI */ ++VECTOR_MODES (FLOAT, 16); /* V4SF V2DF */ ++ ++VECTOR_MODES (INT, 32); /* V32QI V16HI V8SI V4DI */ ++VECTOR_MODES (FLOAT, 32); /* V8SF V4DF */ ++ ++/* Double-sized vector modes for vec_concat. */ ++/* VECTOR_MODE (INT, QI, 32); V32QI */ ++/* VECTOR_MODE (INT, HI, 16); V16HI */ ++/* VECTOR_MODE (INT, SI, 8); V8SI */ ++/* VECTOR_MODE (INT, DI, 4); V4DI */ ++/* VECTOR_MODE (FLOAT, SF, 8); V8SF */ ++/* VECTOR_MODE (FLOAT, DF, 4); V4DF */ ++ ++VECTOR_MODE (INT, QI, 64); /* V64QI */ ++VECTOR_MODE (INT, HI, 32); /* V32HI */ ++VECTOR_MODE (INT, SI, 16); /* V16SI */ ++VECTOR_MODE (INT, DI, 8); /* V8DI */ ++VECTOR_MODE (FLOAT, SF, 16); /* V16SF */ ++VECTOR_MODE (FLOAT, DF, 8); /* V8DF */ ++ ++VECTOR_MODES (FRACT, 4); /* V4QQ V2HQ */ ++VECTOR_MODES (UFRACT, 4); /* V4UQQ V2UHQ */ ++VECTOR_MODES (ACCUM, 4); /* V2HA */ ++VECTOR_MODES (UACCUM, 4); /* V2UHA */ ++ ++INT_MODE (OI, 32); ++ ++/* Keep the OI modes from confusing the compiler into thinking ++ that these modes could actually be used for computation. They are ++ only holders for vectors during data movement. */ +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +index 3ac3b5e19..24e42fa99 100644 +--- a/gcc/config/loongarch/loongarch-protos.h ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -85,10 +85,18 @@ extern bool loongarch_split_move_p (rtx, rtx); + extern void loongarch_split_move (rtx, rtx, rtx); + extern bool loongarch_addu16i_imm12_operand_p (HOST_WIDE_INT, machine_mode); + extern void loongarch_split_plus_constant (rtx *, machine_mode); ++extern bool loongarch_split_move_insn_p (rtx, rtx); ++extern void loongarch_split_move_insn (rtx, rtx, rtx); ++extern void loongarch_split_128bit_move (rtx, rtx); ++extern bool loongarch_split_128bit_move_p (rtx, rtx); ++extern void loongarch_split_lsx_copy_d (rtx, rtx, rtx, rtx (*)(rtx, rtx, rtx)); ++extern void loongarch_split_lsx_insert_d (rtx, rtx, rtx, rtx); ++extern void loongarch_split_lsx_fill_d (rtx, rtx); + extern const char *loongarch_output_move (rtx, rtx); + extern bool loongarch_cfun_has_cprestore_slot_p (void); + #ifdef RTX_CODE + extern void loongarch_expand_scc (rtx *); ++extern bool loongarch_expand_vec_cmp (rtx *); + extern void loongarch_expand_conditional_branch (rtx *); + extern void loongarch_expand_conditional_move (rtx *); + extern void loongarch_expand_conditional_trap (rtx); +@@ -110,6 +118,15 @@ extern bool loongarch_small_data_pattern_p (rtx); + extern rtx loongarch_rewrite_small_data (rtx); + extern rtx loongarch_return_addr (int, rtx); + ++extern bool loongarch_const_vector_same_val_p (rtx, machine_mode); ++extern bool loongarch_const_vector_same_bytes_p (rtx, machine_mode); ++extern bool loongarch_const_vector_same_int_p (rtx, machine_mode, HOST_WIDE_INT, ++ HOST_WIDE_INT); ++extern bool loongarch_const_vector_shuffle_set_p (rtx, machine_mode); ++extern bool loongarch_const_vector_bitimm_set_p (rtx, machine_mode); ++extern bool loongarch_const_vector_bitimm_clr_p (rtx, machine_mode); ++extern rtx loongarch_lsx_vec_parallel_const_half (machine_mode, bool); ++extern rtx loongarch_gen_const_int_vector (machine_mode, HOST_WIDE_INT); + extern enum reg_class loongarch_secondary_reload_class (enum reg_class, + machine_mode, + rtx, bool); +@@ -129,6 +146,7 @@ extern const char *loongarch_output_equal_conditional_branch (rtx_insn *, + rtx *, + bool); + extern const char *loongarch_output_division (const char *, rtx *); ++extern const char *loongarch_lsx_output_division (const char *, rtx *); + extern const char *loongarch_output_probe_stack_range (rtx, rtx, rtx); + extern bool loongarch_hard_regno_rename_ok (unsigned int, unsigned int); + extern int loongarch_dspalu_bypass_p (rtx, rtx); +@@ -156,6 +174,13 @@ union loongarch_gen_fn_ptrs + extern void loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs, + rtx, rtx, rtx, rtx, rtx); + ++extern void loongarch_expand_vector_init (rtx, rtx); ++extern void loongarch_expand_vec_unpack (rtx op[2], bool, bool); ++extern void loongarch_expand_vec_perm (rtx, rtx, rtx, rtx); ++extern void loongarch_expand_vector_extract (rtx, rtx, int); ++extern void loongarch_expand_vector_reduc (rtx (*)(rtx, rtx, rtx), rtx, rtx); ++ ++extern int loongarch_ldst_scaled_shift (machine_mode); + extern bool loongarch_signed_immediate_p (unsigned HOST_WIDE_INT, int, int); + extern bool loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT, int, int); + extern bool loongarch_12bit_offset_address_p (rtx, machine_mode); +@@ -171,6 +196,9 @@ extern bool loongarch_split_symbol_type (enum loongarch_symbol_type); + typedef rtx (*mulsidi3_gen_fn) (rtx, rtx, rtx); + + extern void loongarch_register_frame_header_opt (void); ++extern void loongarch_expand_vec_cond_expr (machine_mode, machine_mode, rtx *); ++extern void loongarch_expand_vec_cond_mask_expr (machine_mode, machine_mode, ++ rtx *); + + /* Routines implemented in loongarch-c.c. */ + void loongarch_cpu_cpp_builtins (cpp_reader *); +@@ -180,6 +208,9 @@ extern void loongarch_atomic_assign_expand_fenv (tree *, tree *, tree *); + extern tree loongarch_builtin_decl (unsigned int, bool); + extern rtx loongarch_expand_builtin (tree, rtx, rtx subtarget ATTRIBUTE_UNUSED, + machine_mode, int); ++extern tree loongarch_builtin_vectorized_function (unsigned int, tree, tree); ++extern rtx loongarch_gen_const_int_vector_shuffle (machine_mode, int); + extern tree loongarch_build_builtin_va_list (void); + ++extern rtx loongarch_build_signbit_mask (machine_mode, bool, bool); + #endif /* ! GCC_LOONGARCH_PROTOS_H */ +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index bd774d4a9..40b83d72b 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -432,7 +432,7 @@ loongarch_flatten_aggregate_argument (const_tree type, + + static unsigned + loongarch_pass_aggregate_num_fpr (const_tree type, +- loongarch_aggregate_field fields[2]) ++ loongarch_aggregate_field fields[2]) + { + int n = loongarch_flatten_aggregate_argument (type, fields); + +@@ -770,7 +770,7 @@ loongarch_setup_incoming_varargs (cumulative_args_t cum, + { + rtx ptr = plus_constant (Pmode, virtual_incoming_args_rtx, + REG_PARM_STACK_SPACE (cfun->decl) +- - gp_saved * UNITS_PER_WORD); ++ - gp_saved * UNITS_PER_WORD); + rtx mem = gen_frame_mem (BLKmode, ptr); + set_mem_alias_set (mem, get_varargs_alias_set ()); + +@@ -1046,7 +1046,7 @@ rtx + loongarch_emit_move (rtx dest, rtx src) + { + return (can_create_pseudo_p () ? emit_move_insn (dest, src) +- : emit_move_insn_1 (dest, src)); ++ : emit_move_insn_1 (dest, src)); + } + + /* Save register REG to MEM. Make the instruction frame-related. */ +@@ -1674,6 +1674,140 @@ loongarch_symbol_binds_local_p (const_rtx x) + return false; + } + ++/* Return true if OP is a constant vector with the number of units in MODE, ++ and each unit has the same bit set. */ ++ ++bool ++loongarch_const_vector_bitimm_set_p (rtx op, machine_mode mode) ++{ ++ if (GET_CODE (op) == CONST_VECTOR && op != CONST0_RTX (mode)) ++ { ++ unsigned HOST_WIDE_INT val = UINTVAL (CONST_VECTOR_ELT (op, 0)); ++ int vlog2 = exact_log2 (val & GET_MODE_MASK (GET_MODE_INNER (mode))); ++ ++ if (vlog2 != -1) ++ { ++ gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT); ++ gcc_assert (vlog2 >= 0 && vlog2 <= GET_MODE_UNIT_BITSIZE (mode) - 1); ++ return loongarch_const_vector_same_val_p (op, mode); ++ } ++ } ++ ++ return false; ++} ++ ++/* Return true if OP is a constant vector with the number of units in MODE, ++ and each unit has the same bit clear. */ ++ ++bool ++loongarch_const_vector_bitimm_clr_p (rtx op, machine_mode mode) ++{ ++ if (GET_CODE (op) == CONST_VECTOR && op != CONSTM1_RTX (mode)) ++ { ++ unsigned HOST_WIDE_INT val = ~UINTVAL (CONST_VECTOR_ELT (op, 0)); ++ int vlog2 = exact_log2 (val & GET_MODE_MASK (GET_MODE_INNER (mode))); ++ ++ if (vlog2 != -1) ++ { ++ gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT); ++ gcc_assert (vlog2 >= 0 && vlog2 <= GET_MODE_UNIT_BITSIZE (mode) - 1); ++ return loongarch_const_vector_same_val_p (op, mode); ++ } ++ } ++ ++ return false; ++} ++ ++/* Return true if OP is a constant vector with the number of units in MODE, ++ and each unit has the same value. */ ++ ++bool ++loongarch_const_vector_same_val_p (rtx op, machine_mode mode) ++{ ++ int i, nunits = GET_MODE_NUNITS (mode); ++ rtx first; ++ ++ if (GET_CODE (op) != CONST_VECTOR || GET_MODE (op) != mode) ++ return false; ++ ++ first = CONST_VECTOR_ELT (op, 0); ++ for (i = 1; i < nunits; i++) ++ if (!rtx_equal_p (first, CONST_VECTOR_ELT (op, i))) ++ return false; ++ ++ return true; ++} ++ ++/* Return true if OP is a constant vector with the number of units in MODE, ++ and each unit has the same value as well as replicated bytes in the value. ++*/ ++ ++bool ++loongarch_const_vector_same_bytes_p (rtx op, machine_mode mode) ++{ ++ int i, bytes; ++ HOST_WIDE_INT val, first_byte; ++ rtx first; ++ ++ if (!loongarch_const_vector_same_val_p (op, mode)) ++ return false; ++ ++ first = CONST_VECTOR_ELT (op, 0); ++ bytes = GET_MODE_UNIT_SIZE (mode); ++ val = INTVAL (first); ++ first_byte = val & 0xff; ++ for (i = 1; i < bytes; i++) ++ { ++ val >>= 8; ++ if ((val & 0xff) != first_byte) ++ return false; ++ } ++ ++ return true; ++} ++ ++/* Return true if OP is a constant vector with the number of units in MODE, ++ and each unit has the same integer value in the range [LOW, HIGH]. */ ++ ++bool ++loongarch_const_vector_same_int_p (rtx op, machine_mode mode, HOST_WIDE_INT low, ++ HOST_WIDE_INT high) ++{ ++ HOST_WIDE_INT value; ++ rtx elem0; ++ ++ if (!loongarch_const_vector_same_val_p (op, mode)) ++ return false; ++ ++ elem0 = CONST_VECTOR_ELT (op, 0); ++ if (!CONST_INT_P (elem0)) ++ return false; ++ ++ value = INTVAL (elem0); ++ return (value >= low && value <= high); ++} ++ ++/* Return true if OP is a constant vector with repeated 4-element sets ++ in mode MODE. */ ++ ++bool ++loongarch_const_vector_shuffle_set_p (rtx op, machine_mode mode) ++{ ++ int nunits = GET_MODE_NUNITS (mode); ++ int nsets = nunits / 4; ++ int set = 0; ++ int i, j; ++ ++ /* Check if we have the same 4-element sets. */ ++ for (j = 0; j < nsets; j++, set = 4 * j) ++ for (i = 0; i < 4; i++) ++ if ((INTVAL (XVECEXP (op, 0, i)) ++ != (INTVAL (XVECEXP (op, 0, set + i)) - set)) ++ || !IN_RANGE (INTVAL (XVECEXP (op, 0, set + i)), 0, set + 3)) ++ return false; ++ return true; ++} ++ + /* Return true if rtx constants of mode MODE should be put into a small + data section. */ + +@@ -1791,6 +1925,11 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_type *symbol_type) + static int + loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode) + { ++ /* LSX LD.* and ST.* cannot support loading symbols via an immediate ++ operand. */ ++ if (LSX_SUPPORTED_MODE_P (mode)) ++ return 0; ++ + switch (type) + { + case SYMBOL_GOT_DISP: +@@ -1837,7 +1976,8 @@ loongarch_cannot_force_const_mem (machine_mode mode, rtx x) + references, reload will consider forcing C into memory and using + one of the instruction's memory alternatives. Returning false + here will force it to use an input reload instead. */ +- if (CONST_INT_P (x) && loongarch_legitimate_constant_p (mode, x)) ++ if ((CONST_INT_P (x) || GET_CODE (x) == CONST_VECTOR) ++ && loongarch_legitimate_constant_p (mode, x)) + return true; + + split_const (x, &base, &offset); +@@ -1914,6 +2054,12 @@ loongarch_valid_offset_p (rtx x, machine_mode mode) + && !IMM12_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD)) + return false; + ++ /* LSX LD.* and ST.* supports 10-bit signed offsets. */ ++ if (LSX_SUPPORTED_MODE_P (mode) ++ && !loongarch_signed_immediate_p (INTVAL (x), 10, ++ loongarch_ldst_scaled_shift (mode))) ++ return false; ++ + return true; + } + +@@ -1998,7 +2144,7 @@ loongarch_valid_lo_sum_p (enum loongarch_symbol_type symbol_type, + + static bool + loongarch_valid_index_p (struct loongarch_address_info *info, rtx x, +- machine_mode mode, bool strict_p) ++ machine_mode mode, bool strict_p) + { + rtx index; + +@@ -2051,7 +2197,7 @@ loongarch_classify_address (struct loongarch_address_info *info, rtx x, + } + + if (loongarch_valid_base_register_p (XEXP (x, 1), mode, strict_p) +- && loongarch_valid_index_p (info, XEXP (x, 0), mode, strict_p)) ++ && loongarch_valid_index_p (info, XEXP (x, 0), mode, strict_p)) + { + info->reg = XEXP (x, 1); + return true; +@@ -2126,6 +2272,7 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p) + { + struct loongarch_address_info addr; + int factor; ++ bool lsx_p = !might_split_p && LSX_SUPPORTED_MODE_P (mode); + + if (!loongarch_classify_address (&addr, x, mode, false)) + return 0; +@@ -2143,15 +2290,29 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p) + switch (addr.type) + { + case ADDRESS_REG: ++ if (lsx_p) ++ { ++ /* LSX LD.* and ST.* supports 10-bit signed offsets. */ ++ if (loongarch_signed_immediate_p (INTVAL (addr.offset), 10, ++ loongarch_ldst_scaled_shift (mode))) ++ return 1; ++ else ++ return 0; ++ } ++ return factor; ++ + case ADDRESS_REG_REG: +- case ADDRESS_CONST_INT: + return factor; + ++ case ADDRESS_CONST_INT: ++ return lsx_p ? 0 : factor; ++ + case ADDRESS_LO_SUM: + return factor + 1; + + case ADDRESS_SYMBOLIC: +- return factor * loongarch_symbol_insns (addr.symbol_type, mode); ++ return lsx_p ? 0 ++ : factor * loongarch_symbol_insns (addr.symbol_type, mode); + } + return 0; + } +@@ -2177,6 +2338,19 @@ loongarch_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits, + return loongarch_unsigned_immediate_p (x, bits, shift); + } + ++/* Return the scale shift that applied to LSX LD/ST address offset. */ ++ ++int ++loongarch_ldst_scaled_shift (machine_mode mode) ++{ ++ int shift = exact_log2 (GET_MODE_UNIT_SIZE (mode)); ++ ++ if (shift < 0 || shift > 8) ++ gcc_unreachable (); ++ ++ return shift; ++} ++ + /* Return true if X is a legitimate address with a 12-bit offset + or addr.type is ADDRESS_LO_SUM. + MODE is the mode of the value being accessed. */ +@@ -2244,6 +2418,9 @@ loongarch_const_insns (rtx x) + return loongarch_integer_cost (INTVAL (x)); + + case CONST_VECTOR: ++ if (LSX_SUPPORTED_MODE_P (GET_MODE (x)) ++ && loongarch_const_vector_same_int_p (x, GET_MODE (x), -512, 511)) ++ return 1; + /* Fall through. */ + case CONST_DOUBLE: + return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0; +@@ -2278,7 +2455,7 @@ loongarch_const_insns (rtx x) + case SYMBOL_REF: + case LABEL_REF: + return loongarch_symbol_insns ( +- loongarch_classify_symbol (x), MAX_MACHINE_MODE); ++ loongarch_classify_symbol (x), MAX_MACHINE_MODE); + + default: + return 0; +@@ -2300,7 +2477,26 @@ loongarch_split_const_insns (rtx x) + return low + high; + } + +-static bool loongarch_split_move_insn_p (rtx dest, rtx src); ++bool loongarch_split_move_insn_p (rtx dest, rtx src); ++/* Return one word of 128-bit value OP, taking into account the fixed ++ endianness of certain registers. BYTE selects from the byte address. */ ++ ++rtx ++loongarch_subword_at_byte (rtx op, unsigned int byte) ++{ ++ machine_mode mode; ++ ++ mode = GET_MODE (op); ++ if (mode == VOIDmode) ++ mode = TImode; ++ ++ gcc_assert (!FP_REG_RTX_P (op)); ++ ++ if (MEM_P (op)) ++ return loongarch_rewrite_small_data (adjust_address (op, word_mode, byte)); ++ ++ return simplify_gen_subreg (word_mode, op, mode, byte); ++} + + /* Return the number of instructions needed to implement INSN, + given that it loads from or stores to MEM. */ +@@ -3061,9 +3257,10 @@ loongarch_legitimize_move (machine_mode mode, rtx dest, rtx src) + + /* Both src and dest are non-registers; one special case is supported where + the source is (const_int 0) and the store can source the zero register. +- */ ++ LSX is never able to source the zero register directly in ++ memory operations. */ + if (!register_operand (dest, mode) && !register_operand (src, mode) +- && !const_0_operand (src, mode)) ++ && (!const_0_operand (src, mode) || LSX_SUPPORTED_MODE_P (mode))) + { + loongarch_emit_move (dest, force_reg (mode, src)); + return true; +@@ -3635,6 +3832,54 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + } + } + ++/* Vectorizer cost model implementation. */ ++ ++/* Implement targetm.vectorize.builtin_vectorization_cost. */ ++ ++static int ++loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, ++ tree vectype, ++ int misalign ATTRIBUTE_UNUSED) ++{ ++ unsigned elements; ++ ++ switch (type_of_cost) ++ { ++ case scalar_stmt: ++ case scalar_load: ++ case vector_stmt: ++ case vector_load: ++ case vec_to_scalar: ++ case scalar_to_vec: ++ case cond_branch_not_taken: ++ case vec_promote_demote: ++ case scalar_store: ++ case vector_store: ++ return 1; ++ ++ case vec_perm: ++ return 1; ++ ++ case unaligned_load: ++ case vector_gather_load: ++ return 2; ++ ++ case unaligned_store: ++ case vector_scatter_store: ++ return 10; ++ ++ case cond_branch_taken: ++ return 3; ++ ++ case vec_construct: ++ elements = TYPE_VECTOR_SUBPARTS (vectype); ++ return elements / 2 + 1; ++ ++ default: ++ gcc_unreachable (); ++ } ++} ++ + /* Implement TARGET_ADDRESS_COST. */ + + static int +@@ -3689,6 +3934,11 @@ loongarch_split_move_p (rtx dest, rtx src) + if (FP_REG_RTX_P (src) && MEM_P (dest)) + return false; + } ++ ++ /* Check if LSX moves need splitting. */ ++ if (LSX_SUPPORTED_MODE_P (GET_MODE (dest))) ++ return loongarch_split_128bit_move_p (dest, src); ++ + /* Otherwise split all multiword moves. */ + return size > UNITS_PER_WORD; + } +@@ -3702,7 +3952,9 @@ loongarch_split_move (rtx dest, rtx src, rtx insn_) + rtx low_dest; + + gcc_checking_assert (loongarch_split_move_p (dest, src)); +- if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src)) ++ if (LSX_SUPPORTED_MODE_P (GET_MODE (dest))) ++ loongarch_split_128bit_move (dest, src); ++ else if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src)) + { + if (!TARGET_64BIT && GET_MODE (dest) == DImode) + emit_insn (gen_move_doubleword_fprdi (dest, src)); +@@ -3806,12 +4058,21 @@ loongarch_split_plus_constant (rtx *op, machine_mode mode) + + /* Return true if a move from SRC to DEST in INSN should be split. */ + +-static bool ++bool + loongarch_split_move_insn_p (rtx dest, rtx src) + { + return loongarch_split_move_p (dest, src); + } + ++/* Split a move from SRC to DEST in INSN, given that ++ loongarch_split_move_insn_p holds. */ ++ ++void ++loongarch_split_move_insn (rtx dest, rtx src, rtx insn) ++{ ++ loongarch_split_move (dest, src, insn); ++} ++ + /* Implement TARGET_CONSTANT_ALIGNMENT. */ + + static HOST_WIDE_INT +@@ -3858,7 +4119,7 @@ const char * + loongarch_output_move_index_float (rtx x, machine_mode mode, bool ldr) + { + int index = exact_log2 (GET_MODE_SIZE (mode)); +- if (!IN_RANGE (index, 2, 3)) ++ if (!IN_RANGE (index, 2, 4)) + return NULL; + + struct loongarch_address_info info; +@@ -3867,20 +4128,216 @@ loongarch_output_move_index_float (rtx x, machine_mode mode, bool ldr) + || !loongarch_legitimate_address_p (mode, x, false)) + return NULL; + +- const char *const insn[][2] = ++ const char *const insn[][3] = + { + { + "fstx.s\t%1,%0", +- "fstx.d\t%1,%0" ++ "fstx.d\t%1,%0", ++ "vstx\t%w1,%0" + }, + { + "fldx.s\t%0,%1", +- "fldx.d\t%0,%1" +- }, ++ "fldx.d\t%0,%1", ++ "vldx\t%w0,%1" ++ } + }; + + return insn[ldr][index-2]; + } ++/* Return true if a 128-bit move from SRC to DEST should be split. */ ++ ++bool ++loongarch_split_128bit_move_p (rtx dest, rtx src) ++{ ++ /* LSX-to-LSX moves can be done in a single instruction. */ ++ if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest)) ++ return false; ++ ++ /* Check for LSX loads and stores. */ ++ if (FP_REG_RTX_P (dest) && MEM_P (src)) ++ return false; ++ if (FP_REG_RTX_P (src) && MEM_P (dest)) ++ return false; ++ ++ /* Check for LSX set to an immediate const vector with valid replicated ++ element. */ ++ if (FP_REG_RTX_P (dest) ++ && loongarch_const_vector_same_int_p (src, GET_MODE (src), -512, 511)) ++ return false; ++ ++ /* Check for LSX load zero immediate. */ ++ if (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src))) ++ return false; ++ ++ return true; ++} ++ ++/* Split a 128-bit move from SRC to DEST. */ ++ ++void ++loongarch_split_128bit_move (rtx dest, rtx src) ++{ ++ int byte, index; ++ rtx low_dest, low_src, d, s; ++ ++ if (FP_REG_RTX_P (dest)) ++ { ++ gcc_assert (!MEM_P (src)); ++ ++ rtx new_dest = dest; ++ if (!TARGET_64BIT) ++ { ++ if (GET_MODE (dest) != V4SImode) ++ new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0); ++ } ++ else ++ { ++ if (GET_MODE (dest) != V2DImode) ++ new_dest = simplify_gen_subreg (V2DImode, dest, GET_MODE (dest), 0); ++ } ++ ++ for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode); ++ byte += UNITS_PER_WORD, index++) ++ { ++ s = loongarch_subword_at_byte (src, byte); ++ if (!TARGET_64BIT) ++ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, s, new_dest, ++ GEN_INT (1 << index))); ++ else ++ emit_insn (gen_lsx_vinsgr2vr_d (new_dest, s, new_dest, ++ GEN_INT (1 << index))); ++ } ++ } ++ else if (FP_REG_RTX_P (src)) ++ { ++ gcc_assert (!MEM_P (dest)); ++ ++ rtx new_src = src; ++ if (!TARGET_64BIT) ++ { ++ if (GET_MODE (src) != V4SImode) ++ new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0); ++ } ++ else ++ { ++ if (GET_MODE (src) != V2DImode) ++ new_src = simplify_gen_subreg (V2DImode, src, GET_MODE (src), 0); ++ } ++ ++ for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode); ++ byte += UNITS_PER_WORD, index++) ++ { ++ d = loongarch_subword_at_byte (dest, byte); ++ if (!TARGET_64BIT) ++ emit_insn (gen_lsx_vpickve2gr_w (d, new_src, GEN_INT (index))); ++ else ++ emit_insn (gen_lsx_vpickve2gr_d (d, new_src, GEN_INT (index))); ++ } ++ } ++ else ++ { ++ low_dest = loongarch_subword_at_byte (dest, 0); ++ low_src = loongarch_subword_at_byte (src, 0); ++ gcc_assert (REG_P (low_dest) && REG_P (low_src)); ++ /* Make sure the source register is not written before reading. */ ++ if (REGNO (low_dest) <= REGNO (low_src)) ++ { ++ for (byte = 0; byte < GET_MODE_SIZE (TImode); ++ byte += UNITS_PER_WORD) ++ { ++ d = loongarch_subword_at_byte (dest, byte); ++ s = loongarch_subword_at_byte (src, byte); ++ loongarch_emit_move (d, s); ++ } ++ } ++ else ++ { ++ for (byte = GET_MODE_SIZE (TImode) - UNITS_PER_WORD; byte >= 0; ++ byte -= UNITS_PER_WORD) ++ { ++ d = loongarch_subword_at_byte (dest, byte); ++ s = loongarch_subword_at_byte (src, byte); ++ loongarch_emit_move (d, s); ++ } ++ } ++ } ++} ++ ++ ++/* Split a COPY_S.D with operands DEST, SRC and INDEX. GEN is a function ++ used to generate subregs. */ ++ ++void ++loongarch_split_lsx_copy_d (rtx dest, rtx src, rtx index, ++ rtx (*gen_fn)(rtx, rtx, rtx)) ++{ ++ gcc_assert ((GET_MODE (src) == V2DImode && GET_MODE (dest) == DImode) ++ || (GET_MODE (src) == V2DFmode && GET_MODE (dest) == DFmode)); ++ ++ /* Note that low is always from the lower index, and high is always ++ from the higher index. */ ++ rtx low = loongarch_subword (dest, false); ++ rtx high = loongarch_subword (dest, true); ++ rtx new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0); ++ ++ emit_insn (gen_fn (low, new_src, GEN_INT (INTVAL (index) * 2))); ++ emit_insn (gen_fn (high, new_src, GEN_INT (INTVAL (index) * 2 + 1))); ++} ++ ++/* Split a INSERT.D with operand DEST, SRC1.INDEX and SRC2. */ ++ ++void ++loongarch_split_lsx_insert_d (rtx dest, rtx src1, rtx index, rtx src2) ++{ ++ int i; ++ gcc_assert (GET_MODE (dest) == GET_MODE (src1)); ++ gcc_assert ((GET_MODE (dest) == V2DImode ++ && (GET_MODE (src2) == DImode || src2 == const0_rtx)) ++ || (GET_MODE (dest) == V2DFmode && GET_MODE (src2) == DFmode)); ++ ++ /* Note that low is always from the lower index, and high is always ++ from the higher index. */ ++ rtx low = loongarch_subword (src2, false); ++ rtx high = loongarch_subword (src2, true); ++ rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0); ++ rtx new_src1 = simplify_gen_subreg (V4SImode, src1, GET_MODE (src1), 0); ++ i = exact_log2 (INTVAL (index)); ++ gcc_assert (i != -1); ++ ++ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, low, new_src1, ++ GEN_INT (1 << (i * 2)))); ++ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, ++ GEN_INT (1 << (i * 2 + 1)))); ++} ++ ++/* Split FILL.D. */ ++ ++void ++loongarch_split_lsx_fill_d (rtx dest, rtx src) ++{ ++ gcc_assert ((GET_MODE (dest) == V2DImode ++ && (GET_MODE (src) == DImode || src == const0_rtx)) ++ || (GET_MODE (dest) == V2DFmode && GET_MODE (src) == DFmode)); ++ ++ /* Note that low is always from the lower index, and high is always ++ from the higher index. */ ++ rtx low, high; ++ if (src == const0_rtx) ++ { ++ low = src; ++ high = src; ++ } ++ else ++ { ++ low = loongarch_subword (src, false); ++ high = loongarch_subword (src, true); ++ } ++ rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0); ++ emit_insn (gen_lsx_vreplgr2vr_w (new_dest, low)); ++ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 1))); ++ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 3))); ++} ++ + + /* Return the appropriate instructions to move SRC into DEST. Assume + that SRC is operand 1 and DEST is operand 0. */ +@@ -3892,10 +4349,25 @@ loongarch_output_move (rtx dest, rtx src) + enum rtx_code src_code = GET_CODE (src); + machine_mode mode = GET_MODE (dest); + bool dbl_p = (GET_MODE_SIZE (mode) == 8); ++ bool lsx_p = LSX_SUPPORTED_MODE_P (mode); + + if (loongarch_split_move_p (dest, src)) + return "#"; + ++ if ((lsx_p) ++ && dest_code == REG && FP_REG_P (REGNO (dest)) ++ && src_code == CONST_VECTOR ++ && CONST_INT_P (CONST_VECTOR_ELT (src, 0))) ++ { ++ gcc_assert (loongarch_const_vector_same_int_p (src, mode, -512, 511)); ++ switch (GET_MODE_SIZE (mode)) ++ { ++ case 16: ++ return "vrepli.%v0\t%w0,%E1"; ++ default: gcc_unreachable (); ++ } ++ } ++ + if ((src_code == REG && GP_REG_P (REGNO (src))) + || (src == CONST0_RTX (mode))) + { +@@ -3905,7 +4377,21 @@ loongarch_output_move (rtx dest, rtx src) + return "or\t%0,%z1,$r0"; + + if (FP_REG_P (REGNO (dest))) +- return dbl_p ? "movgr2fr.d\t%0,%z1" : "movgr2fr.w\t%0,%z1"; ++ { ++ if (lsx_p) ++ { ++ gcc_assert (src == CONST0_RTX (GET_MODE (src))); ++ switch (GET_MODE_SIZE (mode)) ++ { ++ case 16: ++ return "vrepli.b\t%w0,0"; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ ++ return dbl_p ? "movgr2fr.d\t%0,%z1" : "movgr2fr.w\t%0,%z1"; ++ } + } + if (dest_code == MEM) + { +@@ -3947,7 +4433,10 @@ loongarch_output_move (rtx dest, rtx src) + { + if (src_code == REG) + if (FP_REG_P (REGNO (src))) +- return dbl_p ? "movfr2gr.d\t%0,%1" : "movfr2gr.s\t%0,%1"; ++ { ++ gcc_assert (!lsx_p); ++ return dbl_p ? "movfr2gr.d\t%0,%1" : "movfr2gr.s\t%0,%1"; ++ } + + if (src_code == MEM) + { +@@ -3992,7 +4481,7 @@ loongarch_output_move (rtx dest, rtx src) + enum loongarch_symbol_type type = SYMBOL_PCREL; + + if (UNSPEC_ADDRESS_P (x)) +- type = UNSPEC_ADDRESS_TYPE (x); ++ type = UNSPEC_ADDRESS_TYPE (x); + + if (type == SYMBOL_TLS_LE) + return "lu12i.w\t%0,%h1"; +@@ -4027,7 +4516,20 @@ loongarch_output_move (rtx dest, rtx src) + if (src_code == REG && FP_REG_P (REGNO (src))) + { + if (dest_code == REG && FP_REG_P (REGNO (dest))) +- return dbl_p ? "fmov.d\t%0,%1" : "fmov.s\t%0,%1"; ++ { ++ if (lsx_p) ++ { ++ switch (GET_MODE_SIZE (mode)) ++ { ++ case 16: ++ return "vori.b\t%w0,%w1,0"; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ ++ return dbl_p ? "fmov.d\t%0,%1" : "fmov.s\t%0,%1"; ++ } + + if (dest_code == MEM) + { +@@ -4038,6 +4540,17 @@ loongarch_output_move (rtx dest, rtx src) + if (insn) + return insn; + ++ if (lsx_p) ++ { ++ switch (GET_MODE_SIZE (mode)) ++ { ++ case 16: ++ return "vst\t%w1,%0"; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ + return dbl_p ? "fst.d\t%1,%0" : "fst.s\t%1,%0"; + } + } +@@ -4053,6 +4566,16 @@ loongarch_output_move (rtx dest, rtx src) + if (insn) + return insn; + ++ if (lsx_p) ++ { ++ switch (GET_MODE_SIZE (mode)) ++ { ++ case 16: ++ return "vld\t%w0,%1"; ++ default: ++ gcc_unreachable (); ++ } ++ } + return dbl_p ? "fld.d\t%0,%1" : "fld.s\t%0,%1"; + } + } +@@ -4252,6 +4775,7 @@ loongarch_extend_comparands (rtx_code code, rtx *op0, rtx *op1) + } + } + ++ + /* Convert a comparison into something that can be used in a branch. On + entry, *OP0 and *OP1 are the values being compared and *CODE is the code + used to compare them. Update them to describe the final comparison. */ +@@ -5056,9 +5580,12 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part, + + 'A' Print a _DB suffix if the memory model requires a release. + 'b' Print the address of a memory operand, without offset. ++ 'B' Print CONST_INT OP element 0 of a replicated CONST_VECTOR ++ as an unsigned byte [0..255]. + 'c' Print an integer. + 'C' Print the integer branch condition for comparison OP. + 'd' Print CONST_INT OP in decimal. ++ 'E' Print CONST_INT OP element 0 of a replicated CONST_VECTOR in decimal. + 'F' Print the FPU branch condition for comparison OP. + 'G' Print a DBAR insn if the memory model requires a release. + 'H' Print address 52-61bit relocation associated with OP. +@@ -5074,13 +5601,16 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part, + 't' Like 'T', but with the EQ/NE cases reversed + 'V' Print exact log2 of CONST_INT OP element 0 of a replicated + CONST_VECTOR in decimal. ++ 'v' Print the insn size suffix b, h, w or d for vector modes V16QI, V8HI, ++ V4SI, V2SI, and w, d for vector modes V4SF, V2DF respectively. + 'W' Print the inverse of the FPU branch condition for comparison OP. ++ 'w' Print a LSX register. + 'X' Print CONST_INT OP in hexadecimal format. + 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format. + 'Y' Print loongarch_fp_conditions[INTVAL (OP)] + 'y' Print exact log2 of CONST_INT OP in decimal. + 'Z' Print OP and a comma for 8CC, otherwise print nothing. +- 'z' Print $0 if OP is zero, otherwise print OP normally. */ ++ 'z' Print $r0 if OP is zero, otherwise print OP normally. */ + + static void + loongarch_print_operand (FILE *file, rtx op, int letter) +@@ -5102,6 +5632,18 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + if (loongarch_memmodel_needs_rel_acq_fence ((enum memmodel) INTVAL (op))) + fputs ("_db", file); + break; ++ case 'E': ++ if (GET_CODE (op) == CONST_VECTOR) ++ { ++ gcc_assert (loongarch_const_vector_same_val_p (op, GET_MODE (op))); ++ op = CONST_VECTOR_ELT (op, 0); ++ gcc_assert (CONST_INT_P (op)); ++ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op)); ++ } ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; ++ + + case 'c': + if (CONST_INT_P (op)) +@@ -5152,6 +5694,18 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + loongarch_print_operand_reloc (file, op, false /* hi64_part*/, + false /* lo_reloc */); + break; ++ case 'B': ++ if (GET_CODE (op) == CONST_VECTOR) ++ { ++ gcc_assert (loongarch_const_vector_same_val_p (op, GET_MODE (op))); ++ op = CONST_VECTOR_ELT (op, 0); ++ gcc_assert (CONST_INT_P (op)); ++ unsigned HOST_WIDE_INT val8 = UINTVAL (op) & GET_MODE_MASK (QImode); ++ fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, val8); ++ } ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; + + case 'm': + if (CONST_INT_P (op)) +@@ -5198,10 +5752,45 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + output_operand_lossage ("invalid use of '%%%c'", letter); + break; + +- case 'W': +- loongarch_print_float_branch_condition (file, reverse_condition (code), +- letter); +- break; ++ case 'v': ++ switch (GET_MODE (op)) ++ { ++ case E_V16QImode: ++ case E_V32QImode: ++ fprintf (file, "b"); ++ break; ++ case E_V8HImode: ++ case E_V16HImode: ++ fprintf (file, "h"); ++ break; ++ case E_V4SImode: ++ case E_V4SFmode: ++ case E_V8SImode: ++ case E_V8SFmode: ++ fprintf (file, "w"); ++ break; ++ case E_V2DImode: ++ case E_V2DFmode: ++ case E_V4DImode: ++ case E_V4DFmode: ++ fprintf (file, "d"); ++ break; ++ default: ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ } ++ break; ++ ++ case 'W': ++ loongarch_print_float_branch_condition (file, reverse_condition (code), ++ letter); ++ break; ++ ++ case 'w': ++ if (code == REG && LSX_REG_P (REGNO (op))) ++ fprintf (file, "$vr%s", ®_names[REGNO (op)][2]); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; + + case 'x': + if (CONST_INT_P (op)) +@@ -5574,9 +6163,13 @@ loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode) + size = GET_MODE_SIZE (mode); + mclass = GET_MODE_CLASS (mode); + +- if (GP_REG_P (regno)) ++ if (GP_REG_P (regno) && !LSX_SUPPORTED_MODE_P (mode)) + return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD; + ++ /* For LSX, allow TImode and 128-bit vector modes in all FPR. */ ++ if (FP_REG_P (regno) && LSX_SUPPORTED_MODE_P (mode)) ++ return true; ++ + if (FP_REG_P (regno)) + { + if (mclass == MODE_FLOAT +@@ -5603,6 +6196,17 @@ loongarch_hard_regno_mode_ok (unsigned int regno, machine_mode mode) + return loongarch_hard_regno_mode_ok_p[mode][regno]; + } + ++ ++static bool ++loongarch_hard_regno_call_part_clobbered (unsigned int, ++ unsigned int regno, machine_mode mode) ++{ ++ if (ISA_HAS_LSX && FP_REG_P (regno) && GET_MODE_SIZE (mode) > 8) ++ return true; ++ ++ return false; ++} ++ + /* Implement TARGET_HARD_REGNO_NREGS. */ + + static unsigned int +@@ -5614,7 +6218,12 @@ loongarch_hard_regno_nregs (unsigned int regno, machine_mode mode) + return (GET_MODE_SIZE (mode) + 3) / 4; + + if (FP_REG_P (regno)) +- return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG; ++ { ++ if (LSX_SUPPORTED_MODE_P (mode)) ++ return 1; ++ ++ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG; ++ } + + /* All other registers are word-sized. */ + return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; +@@ -5641,8 +6250,12 @@ loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode) + if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS])) + { + if (loongarch_hard_regno_mode_ok (FP_REG_FIRST, mode)) +- size = MIN (size, UNITS_PER_FPREG); +- ++ { ++ if (LSX_SUPPORTED_MODE_P (mode)) ++ size = MIN (size, UNITS_PER_LSX_REG); ++ else ++ size = MIN (size, UNITS_PER_FPREG); ++ } + left &= ~reg_class_contents[FP_REGS]; + } + if (!hard_reg_set_empty_p (left)) +@@ -5653,9 +6266,13 @@ loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode) + /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */ + + static bool +-loongarch_can_change_mode_class (machine_mode, machine_mode, ++loongarch_can_change_mode_class (machine_mode from, machine_mode to, + reg_class_t rclass) + { ++ /* Allow conversions between different LSX vector modes. */ ++ if (LSX_SUPPORTED_MODE_P (from) && LSX_SUPPORTED_MODE_P (to)) ++ return true; ++ + return !reg_classes_intersect_p (FP_REGS, rclass); + } + +@@ -5675,7 +6292,7 @@ loongarch_mode_ok_for_mov_fmt_p (machine_mode mode) + return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT; + + default: +- return 0; ++ return LSX_SUPPORTED_MODE_P (mode); + } + } + +@@ -5832,7 +6449,12 @@ loongarch_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x, + if (regno < 0 + || (MEM_P (x) + && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))) +- /* In this case we can use fld.s, fst.s, fld.d or fst.d. */ ++ /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use ++ pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */ ++ return NO_REGS; ++ ++ if (MEM_P (x) && LSX_SUPPORTED_MODE_P (mode)) ++ /* In this case we can use LSX LD.* and ST.*. */ + return NO_REGS; + + if (GP_REG_P (regno) || x == CONST0_RTX (mode)) +@@ -5867,6 +6489,14 @@ loongarch_valid_pointer_mode (scalar_int_mode mode) + return mode == SImode || (TARGET_64BIT && mode == DImode); + } + ++/* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */ ++ ++static bool ++loongarch_vector_mode_supported_p (machine_mode mode) ++{ ++ return LSX_SUPPORTED_MODE_P (mode); ++} ++ + /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */ + + static bool +@@ -5879,6 +6509,48 @@ loongarch_scalar_mode_supported_p (scalar_mode mode) + return default_scalar_mode_supported_p (mode); + } + ++/* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */ ++ ++static machine_mode ++loongarch_preferred_simd_mode (scalar_mode mode) ++{ ++ if (!ISA_HAS_LSX) ++ return word_mode; ++ ++ switch (mode) ++ { ++ case E_QImode: ++ return E_V16QImode; ++ case E_HImode: ++ return E_V8HImode; ++ case E_SImode: ++ return E_V4SImode; ++ case E_DImode: ++ return E_V2DImode; ++ ++ case E_SFmode: ++ return E_V4SFmode; ++ ++ case E_DFmode: ++ return E_V2DFmode; ++ ++ default: ++ break; ++ } ++ return word_mode; ++} ++ ++static unsigned int ++loongarch_autovectorize_vector_modes (vector_modes *modes, bool) ++{ ++ if (ISA_HAS_LSX) ++ { ++ modes->safe_push (V16QImode); ++ } ++ ++ return 0; ++} ++ + /* Return the assembly code for INSN, which has the operands given by + OPERANDS, and which branches to OPERANDS[0] if some condition is true. + BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[0] +@@ -6043,6 +6715,29 @@ loongarch_output_division (const char *division, rtx *operands) + return s; + } + ++/* Return the assembly code for LSX DIV_{S,U}.DF or MOD_{S,U}.DF instructions, ++ which has the operands given by OPERANDS. Add in a divide-by-zero check ++ if needed. */ ++ ++const char * ++loongarch_lsx_output_division (const char *division, rtx *operands) ++{ ++ const char *s; ++ ++ s = division; ++ if (TARGET_CHECK_ZERO_DIV) ++ { ++ if (ISA_HAS_LSX) ++ { ++ output_asm_insn ("vsetallnez.%v0\t$fcc7,%w2",operands); ++ output_asm_insn (s, operands); ++ output_asm_insn ("bcnez\t$fcc7,1f", operands); ++ } ++ s = "break\t7\n1:"; ++ } ++ return s; ++} ++ + /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output + dependencies have no cost. */ + +@@ -6323,6 +7018,9 @@ loongarch_option_override_internal (struct gcc_options *opts, + if (TARGET_DIRECT_EXTERN_ACCESS && flag_shlib) + error ("%qs cannot be used for compiling a shared library", + "-mdirect-extern-access"); ++ if (loongarch_vector_access_cost == 0) ++ loongarch_vector_access_cost = 5; ++ + + switch (la_target.cmodel) + { +@@ -6541,64 +7239,60 @@ loongarch_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) + emit_insn (gen_clear_cache (addr, end_addr)); + } + +-/* Implement HARD_REGNO_CALLER_SAVE_MODE. */ +- +-machine_mode +-loongarch_hard_regno_caller_save_mode (unsigned int regno, unsigned int nregs, +- machine_mode mode) +-{ +- /* For performance, avoid saving/restoring upper parts of a register +- by returning MODE as save mode when the mode is known. */ +- if (mode == VOIDmode) +- return choose_hard_reg_mode (regno, nregs, NULL); +- else +- return mode; +-} ++/* Generate or test for an insn that supports a constant permutation. */ + +-/* Implement TARGET_SPILL_CLASS. */ ++#define MAX_VECT_LEN 32 + +-static reg_class_t +-loongarch_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED, +- machine_mode mode ATTRIBUTE_UNUSED) ++struct expand_vec_perm_d + { +- return NO_REGS; +-} +- +-/* Implement TARGET_PROMOTE_FUNCTION_MODE. */ ++ rtx target, op0, op1; ++ unsigned char perm[MAX_VECT_LEN]; ++ machine_mode vmode; ++ unsigned char nelt; ++ bool one_vector_p; ++ bool testing_p; ++}; + +-/* This function is equivalent to default_promote_function_mode_always_promote +- except that it returns a promoted mode even if type is NULL_TREE. This is +- needed by libcalls which have no type (only a mode) such as fixed conversion +- routines that take a signed or unsigned char/short argument and convert it +- to a fixed type. */ ++/* Construct (set target (vec_select op0 (parallel perm))) and ++ return true if that's a valid instruction in the active ISA. */ + +-static machine_mode +-loongarch_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, +- machine_mode mode, +- int *punsignedp ATTRIBUTE_UNUSED, +- const_tree fntype ATTRIBUTE_UNUSED, +- int for_return ATTRIBUTE_UNUSED) ++static bool ++loongarch_expand_vselect (rtx target, rtx op0, ++ const unsigned char *perm, unsigned nelt) + { +- int unsignedp; ++ rtx rperm[MAX_VECT_LEN], x; ++ rtx_insn *insn; ++ unsigned i; + +- if (type != NULL_TREE) +- return promote_mode (type, mode, punsignedp); ++ for (i = 0; i < nelt; ++i) ++ rperm[i] = GEN_INT (perm[i]); + +- unsignedp = *punsignedp; +- PROMOTE_MODE (mode, unsignedp, type); +- *punsignedp = unsignedp; +- return mode; ++ x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm)); ++ x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x); ++ x = gen_rtx_SET (target, x); ++ ++ insn = emit_insn (x); ++ if (recog_memoized (insn) < 0) ++ { ++ remove_insn (insn); ++ return false; ++ } ++ return true; + } + +-/* Implement TARGET_STARTING_FRAME_OFFSET. See loongarch_compute_frame_info +- for details about the frame layout. */ ++/* Similar, but generate a vec_concat from op0 and op1 as well. */ + +-static HOST_WIDE_INT +-loongarch_starting_frame_offset (void) ++static bool ++loongarch_expand_vselect_vconcat (rtx target, rtx op0, rtx op1, ++ const unsigned char *perm, unsigned nelt) + { +- if (FRAME_GROWS_DOWNWARD) +- return 0; +- return crtl->outgoing_args_size; ++ machine_mode v2mode; ++ rtx x; ++ ++ if (!GET_MODE_2XWIDER_MODE (GET_MODE (op0)).exists (&v2mode)) ++ return false; ++ x = gen_rtx_VEC_CONCAT (v2mode, op0, op1); ++ return loongarch_expand_vselect (target, x, perm, nelt); + } + + static tree +@@ -6861,105 +7555,1291 @@ loongarch_set_handled_components (sbitmap components) + #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t" + #undef TARGET_ASM_ALIGNED_DI_OP + #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t" ++/* Construct (set target (vec_select op0 (parallel selector))) and ++ return true if that's a valid instruction in the active ISA. */ + +-#undef TARGET_OPTION_OVERRIDE +-#define TARGET_OPTION_OVERRIDE loongarch_option_override +- +-#undef TARGET_LEGITIMIZE_ADDRESS +-#define TARGET_LEGITIMIZE_ADDRESS loongarch_legitimize_address +- +-#undef TARGET_ASM_SELECT_RTX_SECTION +-#define TARGET_ASM_SELECT_RTX_SECTION loongarch_select_rtx_section +-#undef TARGET_ASM_FUNCTION_RODATA_SECTION +-#define TARGET_ASM_FUNCTION_RODATA_SECTION loongarch_function_rodata_section ++static bool ++loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d) ++{ ++ rtx x, elts[MAX_VECT_LEN]; ++ rtvec v; ++ rtx_insn *insn; ++ unsigned i; + +-#undef TARGET_SCHED_INIT +-#define TARGET_SCHED_INIT loongarch_sched_init +-#undef TARGET_SCHED_REORDER +-#define TARGET_SCHED_REORDER loongarch_sched_reorder +-#undef TARGET_SCHED_REORDER2 +-#define TARGET_SCHED_REORDER2 loongarch_sched_reorder2 +-#undef TARGET_SCHED_VARIABLE_ISSUE +-#define TARGET_SCHED_VARIABLE_ISSUE loongarch_variable_issue +-#undef TARGET_SCHED_ADJUST_COST +-#define TARGET_SCHED_ADJUST_COST loongarch_adjust_cost +-#undef TARGET_SCHED_ISSUE_RATE +-#define TARGET_SCHED_ISSUE_RATE loongarch_issue_rate +-#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD +-#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ +- loongarch_multipass_dfa_lookahead ++ if (!ISA_HAS_LSX) ++ return false; + +-#undef TARGET_FUNCTION_OK_FOR_SIBCALL +-#define TARGET_FUNCTION_OK_FOR_SIBCALL loongarch_function_ok_for_sibcall ++ for (i = 0; i < d->nelt; i++) ++ elts[i] = GEN_INT (d->perm[i]); + +-#undef TARGET_VALID_POINTER_MODE +-#define TARGET_VALID_POINTER_MODE loongarch_valid_pointer_mode +-#undef TARGET_REGISTER_MOVE_COST +-#define TARGET_REGISTER_MOVE_COST loongarch_register_move_cost +-#undef TARGET_MEMORY_MOVE_COST +-#define TARGET_MEMORY_MOVE_COST loongarch_memory_move_cost +-#undef TARGET_RTX_COSTS +-#define TARGET_RTX_COSTS loongarch_rtx_costs +-#undef TARGET_ADDRESS_COST +-#define TARGET_ADDRESS_COST loongarch_address_cost ++ v = gen_rtvec_v (d->nelt, elts); ++ x = gen_rtx_PARALLEL (VOIDmode, v); + +-#undef TARGET_IN_SMALL_DATA_P +-#define TARGET_IN_SMALL_DATA_P loongarch_in_small_data_p ++ if (!loongarch_const_vector_shuffle_set_p (x, d->vmode)) ++ return false; + +-#undef TARGET_PREFERRED_RELOAD_CLASS +-#define TARGET_PREFERRED_RELOAD_CLASS loongarch_preferred_reload_class ++ x = gen_rtx_VEC_SELECT (d->vmode, d->op0, x); ++ x = gen_rtx_SET (d->target, x); + +-#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE +-#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true ++ insn = emit_insn (x); ++ if (recog_memoized (insn) < 0) ++ { ++ remove_insn (insn); ++ return false; ++ } ++ return true; ++} + +-#undef TARGET_EXPAND_BUILTIN_VA_START +-#define TARGET_EXPAND_BUILTIN_VA_START loongarch_va_start ++void ++loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel) ++{ ++ machine_mode vmode = GET_MODE (target); + +-#undef TARGET_PROMOTE_FUNCTION_MODE +-#define TARGET_PROMOTE_FUNCTION_MODE loongarch_promote_function_mode +-#undef TARGET_RETURN_IN_MEMORY +-#define TARGET_RETURN_IN_MEMORY loongarch_return_in_memory ++ switch (vmode) ++ { ++ case E_V16QImode: ++ emit_insn (gen_lsx_vshuf_b (target, op1, op0, sel)); ++ break; ++ case E_V2DFmode: ++ emit_insn (gen_lsx_vshuf_d_f (target, sel, op1, op0)); ++ break; ++ case E_V2DImode: ++ emit_insn (gen_lsx_vshuf_d (target, sel, op1, op0)); ++ break; ++ case E_V4SFmode: ++ emit_insn (gen_lsx_vshuf_w_f (target, sel, op1, op0)); ++ break; ++ case E_V4SImode: ++ emit_insn (gen_lsx_vshuf_w (target, sel, op1, op0)); ++ break; ++ case E_V8HImode: ++ emit_insn (gen_lsx_vshuf_h (target, sel, op1, op0)); ++ break; ++ default: ++ break; ++ } ++} + +-#undef TARGET_FUNCTION_VALUE +-#define TARGET_FUNCTION_VALUE loongarch_function_value +-#undef TARGET_LIBCALL_VALUE +-#define TARGET_LIBCALL_VALUE loongarch_libcall_value ++static bool ++loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d) ++{ ++ int i; ++ rtx target, op0, op1, sel, tmp; ++ rtx rperm[MAX_VECT_LEN]; + +-#undef TARGET_ASM_OUTPUT_MI_THUNK +-#define TARGET_ASM_OUTPUT_MI_THUNK loongarch_output_mi_thunk +-#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK +-#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \ +- hook_bool_const_tree_hwi_hwi_const_tree_true ++ if (d->vmode == E_V2DImode || d->vmode == E_V2DFmode ++ || d->vmode == E_V4SImode || d->vmode == E_V4SFmode ++ || d->vmode == E_V8HImode || d->vmode == E_V16QImode) ++ { ++ target = d->target; ++ op0 = d->op0; ++ op1 = d->one_vector_p ? d->op0 : d->op1; + +-#undef TARGET_PRINT_OPERAND +-#define TARGET_PRINT_OPERAND loongarch_print_operand +-#undef TARGET_PRINT_OPERAND_ADDRESS +-#define TARGET_PRINT_OPERAND_ADDRESS loongarch_print_operand_address +-#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P +-#define TARGET_PRINT_OPERAND_PUNCT_VALID_P \ +- loongarch_print_operand_punct_valid_p ++ if (GET_MODE (op0) != GET_MODE (op1) ++ || GET_MODE (op0) != GET_MODE (target)) ++ return false; + +-#undef TARGET_SETUP_INCOMING_VARARGS +-#define TARGET_SETUP_INCOMING_VARARGS loongarch_setup_incoming_varargs +-#undef TARGET_STRICT_ARGUMENT_NAMING +-#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true +-#undef TARGET_MUST_PASS_IN_STACK +-#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size +-#undef TARGET_PASS_BY_REFERENCE +-#define TARGET_PASS_BY_REFERENCE loongarch_pass_by_reference +-#undef TARGET_ARG_PARTIAL_BYTES +-#define TARGET_ARG_PARTIAL_BYTES loongarch_arg_partial_bytes +-#undef TARGET_FUNCTION_ARG +-#define TARGET_FUNCTION_ARG loongarch_function_arg +-#undef TARGET_FUNCTION_ARG_ADVANCE +-#define TARGET_FUNCTION_ARG_ADVANCE loongarch_function_arg_advance +-#undef TARGET_FUNCTION_ARG_BOUNDARY +-#define TARGET_FUNCTION_ARG_BOUNDARY loongarch_function_arg_boundary ++ if (d->testing_p) ++ return true; + +-#undef TARGET_SCALAR_MODE_SUPPORTED_P +-#define TARGET_SCALAR_MODE_SUPPORTED_P loongarch_scalar_mode_supported_p ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ rperm[i] = GEN_INT (d->perm[i]); ++ } + +-#undef TARGET_INIT_BUILTINS ++ if (d->vmode == E_V2DFmode) ++ { ++ sel = gen_rtx_CONST_VECTOR (E_V2DImode, gen_rtvec_v (d->nelt, rperm)); ++ tmp = gen_rtx_SUBREG (E_V2DImode, d->target, 0); ++ emit_move_insn (tmp, sel); ++ } ++ else if (d->vmode == E_V4SFmode) ++ { ++ sel = gen_rtx_CONST_VECTOR (E_V4SImode, gen_rtvec_v (d->nelt, rperm)); ++ tmp = gen_rtx_SUBREG (E_V4SImode, d->target, 0); ++ emit_move_insn (tmp, sel); ++ } ++ else ++ { ++ sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt, rperm)); ++ emit_move_insn (d->target, sel); ++ } ++ ++ switch (d->vmode) ++ { ++ case E_V2DFmode: ++ emit_insn (gen_lsx_vshuf_d_f (target, target, op1, op0)); ++ break; ++ case E_V2DImode: ++ emit_insn (gen_lsx_vshuf_d (target, target, op1, op0)); ++ break; ++ case E_V4SFmode: ++ emit_insn (gen_lsx_vshuf_w_f (target, target, op1, op0)); ++ break; ++ case E_V4SImode: ++ emit_insn (gen_lsx_vshuf_w (target, target, op1, op0)); ++ break; ++ case E_V8HImode: ++ emit_insn (gen_lsx_vshuf_h (target, target, op1, op0)); ++ break; ++ case E_V16QImode: ++ emit_insn (gen_lsx_vshuf_b (target, op1, op0, target)); ++ break; ++ default: ++ break; ++ } ++ ++ return true; ++ } ++ return false; ++} ++ ++static bool ++loongarch_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) ++{ ++ unsigned int i, nelt = d->nelt; ++ unsigned char perm2[MAX_VECT_LEN]; ++ ++ if (d->one_vector_p) ++ { ++ /* Try interleave with alternating operands. */ ++ memcpy (perm2, d->perm, sizeof (perm2)); ++ for (i = 1; i < nelt; i += 2) ++ perm2[i] += nelt; ++ if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, perm2, ++ nelt)) ++ return true; ++ } ++ else ++ { ++ if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, ++ d->perm, nelt)) ++ return true; ++ ++ /* Try again with swapped operands. */ ++ for (i = 0; i < nelt; ++i) ++ perm2[i] = (d->perm[i] + nelt) & (2 * nelt - 1); ++ if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, ++ nelt)) ++ return true; ++ } ++ ++ if (loongarch_expand_lsx_shuffle (d)) ++ return true; ++ return false; ++} ++ ++/* Implementation of constant vector permuatation. This function identifies ++ * recognized pattern of permuation selector argument, and use one or more ++ * instruction(s) to finish the permutation job correctly. For unsupported ++ * patterns, it will return false. */ ++ ++static bool ++loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) ++{ ++ /* Although we have the LSX vec_perm template, there's still some ++ 128bit vector permuatation operations send to vectorize_vec_perm_const. ++ In this case, we just simpliy wrap them by single vshuf.* instruction, ++ because LSX vshuf.* instruction just have the same behavior that GCC ++ expects. */ ++ return loongarch_try_expand_lsx_vshuf_const (d); ++} ++ ++/* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */ ++ ++static bool ++loongarch_vectorize_vec_perm_const (machine_mode vmode, machine_mode op_mode, ++ rtx target, rtx op0, rtx op1, ++ const vec_perm_indices &sel) ++{ ++ if (vmode != op_mode) ++ return false; ++ ++ struct expand_vec_perm_d d; ++ int i, nelt, which; ++ unsigned char orig_perm[MAX_VECT_LEN]; ++ bool ok; ++ ++ d.target = target; ++ if (op0) ++ { ++ rtx nop0 = force_reg (vmode, op0); ++ if (op0 == op1) ++ op1 = nop0; ++ op0 = nop0; ++ } ++ if (op1) ++ op1 = force_reg (vmode, op1); ++ d.op0 = op0; ++ d.op1 = op1; ++ ++ d.vmode = vmode; ++ gcc_assert (VECTOR_MODE_P (vmode)); ++ d.nelt = nelt = GET_MODE_NUNITS (vmode); ++ d.testing_p = !target; ++ ++ /* This is overly conservative, but ensures we don't get an ++ uninitialized warning on ORIG_PERM. */ ++ memset (orig_perm, 0, MAX_VECT_LEN); ++ for (i = which = 0; i < nelt; ++i) ++ { ++ int ei = sel[i] & (2 * nelt - 1); ++ which |= (ei < nelt ? 1 : 2); ++ orig_perm[i] = ei; ++ } ++ memcpy (d.perm, orig_perm, MAX_VECT_LEN); ++ ++ switch (which) ++ { ++ default: ++ gcc_unreachable (); ++ ++ case 3: ++ d.one_vector_p = false; ++ if (d.testing_p || !rtx_equal_p (d.op0, d.op1)) ++ break; ++ /* FALLTHRU */ ++ ++ case 2: ++ for (i = 0; i < nelt; ++i) ++ d.perm[i] &= nelt - 1; ++ d.op0 = d.op1; ++ d.one_vector_p = true; ++ break; ++ ++ case 1: ++ d.op1 = d.op0; ++ d.one_vector_p = true; ++ break; ++ } ++ ++ if (d.testing_p) ++ { ++ d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1); ++ d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2); ++ if (!d.one_vector_p) ++ d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3); ++ ++ ok = loongarch_expand_vec_perm_const_2 (&d); ++ if (ok) ++ return ok; ++ ++ start_sequence (); ++ ok = loongarch_expand_vec_perm_const_1 (&d); ++ end_sequence (); ++ return ok; ++ } ++ ++ ok = loongarch_expand_vec_perm_const_2 (&d); ++ if (!ok) ++ ok = loongarch_expand_vec_perm_const_1 (&d); ++ ++ /* If we were given a two-vector permutation which just happened to ++ have both input vectors equal, we folded this into a one-vector ++ permutation. There are several loongson patterns that are matched ++ via direct vec_select+vec_concat expansion, but we do not have ++ support in loongarch_expand_vec_perm_const_1 to guess the adjustment ++ that should be made for a single operand. Just try again with ++ the original permutation. */ ++ if (!ok && which == 3) ++ { ++ d.op0 = op0; ++ d.op1 = op1; ++ d.one_vector_p = false; ++ memcpy (d.perm, orig_perm, MAX_VECT_LEN); ++ ok = loongarch_expand_vec_perm_const_1 (&d); ++ } ++ ++ return ok; ++} ++ ++static int ++loongarch_cpu_sched_reassociation_width (struct loongarch_target *target, ++ unsigned int opc, machine_mode mode) ++{ ++ /* unreferenced argument */ ++ (void) opc; ++ ++ switch (target->cpu_tune) ++ { ++ case CPU_LOONGARCH64: ++ case CPU_LA464: ++ /* Vector part. */ ++ if (LSX_SUPPORTED_MODE_P (mode)) ++ { ++ /* Integer vector instructions execute in FP unit. ++ The width of integer/float-point vector instructions is 3. */ ++ return 3; ++ } ++ ++ /* Scalar part. */ ++ else if (INTEGRAL_MODE_P (mode)) ++ return 1; ++ else if (FLOAT_MODE_P (mode)) ++ { ++ if (opc == PLUS_EXPR) ++ { ++ return 2; ++ } ++ return 4; ++ } ++ break; ++ default: ++ break; ++ } ++ ++ /* default is 1 */ ++ return 1; ++} ++ ++/* Implement TARGET_SCHED_REASSOCIATION_WIDTH. */ ++ ++static int ++loongarch_sched_reassociation_width (unsigned int opc, machine_mode mode) ++{ ++ return loongarch_cpu_sched_reassociation_width (&la_target, opc, mode); ++} ++ ++/* Implement extract a scalar element from vecotr register */ ++ ++void ++loongarch_expand_vector_extract (rtx target, rtx vec, int elt) ++{ ++ machine_mode mode = GET_MODE (vec); ++ machine_mode inner_mode = GET_MODE_INNER (mode); ++ rtx tmp; ++ ++ switch (mode) ++ { ++ case E_V8HImode: ++ case E_V16QImode: ++ break; ++ ++ default: ++ break; ++ } ++ ++ tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt))); ++ tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp); ++ ++ /* Let the rtl optimizers know about the zero extension performed. */ ++ if (inner_mode == QImode || inner_mode == HImode) ++ { ++ tmp = gen_rtx_ZERO_EXTEND (SImode, tmp); ++ target = gen_lowpart (SImode, target); ++ } ++ if (inner_mode == SImode || inner_mode == DImode) ++ { ++ tmp = gen_rtx_SIGN_EXTEND (inner_mode, tmp); ++ } ++ ++ emit_insn (gen_rtx_SET (target, tmp)); ++} ++ ++/* Generate code to copy vector bits i / 2 ... i - 1 from vector SRC ++ to bits 0 ... i / 2 - 1 of vector DEST, which has the same mode. ++ The upper bits of DEST are undefined, though they shouldn't cause ++ exceptions (some bits from src or all zeros are ok). */ ++ ++static void ++emit_reduc_half (rtx dest, rtx src, int i) ++{ ++ rtx tem, d = dest; ++ switch (GET_MODE (src)) ++ { ++ case E_V4SFmode: ++ tem = gen_lsx_vbsrl_w_f (dest, src, GEN_INT (i == 128 ? 8 : 4)); ++ break; ++ case E_V2DFmode: ++ tem = gen_lsx_vbsrl_d_f (dest, src, GEN_INT (8)); ++ break; ++ case E_V16QImode: ++ case E_V8HImode: ++ case E_V4SImode: ++ case E_V2DImode: ++ d = gen_reg_rtx (V2DImode); ++ tem = gen_lsx_vbsrl_d (d, gen_lowpart (V2DImode, src), GEN_INT (i/16)); ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ emit_insn (tem); ++ if (d != dest) ++ emit_move_insn (dest, gen_lowpart (GET_MODE (dest), d)); ++} ++ ++/* Expand a vector reduction. FN is the binary pattern to reduce; ++ DEST is the destination; IN is the input vector. */ ++ ++void ++loongarch_expand_vector_reduc (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in) ++{ ++ rtx half, dst, vec = in; ++ machine_mode mode = GET_MODE (in); ++ int i; ++ ++ for (i = GET_MODE_BITSIZE (mode); ++ i > GET_MODE_UNIT_BITSIZE (mode); ++ i >>= 1) ++ { ++ half = gen_reg_rtx (mode); ++ emit_reduc_half (half, vec, i); ++ if (i == GET_MODE_UNIT_BITSIZE (mode) * 2) ++ dst = dest; ++ else ++ dst = gen_reg_rtx (mode); ++ emit_insn (fn (dst, half, vec)); ++ vec = dst; ++ } ++} ++ ++/* Expand an integral vector unpack operation. */ ++ ++void ++loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p) ++{ ++ machine_mode imode = GET_MODE (operands[1]); ++ rtx (*unpack) (rtx, rtx, rtx); ++ rtx (*cmpFunc) (rtx, rtx, rtx); ++ rtx tmp, dest; ++ ++ if (ISA_HAS_LSX) ++ { ++ switch (imode) ++ { ++ case E_V4SImode: ++ if (high_p != 0) ++ unpack = gen_lsx_vilvh_w; ++ else ++ unpack = gen_lsx_vilvl_w; ++ ++ cmpFunc = gen_lsx_vslt_w; ++ break; ++ ++ case E_V8HImode: ++ if (high_p != 0) ++ unpack = gen_lsx_vilvh_h; ++ else ++ unpack = gen_lsx_vilvl_h; ++ ++ cmpFunc = gen_lsx_vslt_h; ++ break; ++ ++ case E_V16QImode: ++ if (high_p != 0) ++ unpack = gen_lsx_vilvh_b; ++ else ++ unpack = gen_lsx_vilvl_b; ++ ++ cmpFunc = gen_lsx_vslt_b; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ break; ++ } ++ ++ if (!unsigned_p) ++ { ++ /* Extract sign extention for each element comparing each element ++ with immediate zero. */ ++ tmp = gen_reg_rtx (imode); ++ emit_insn (cmpFunc (tmp, operands[1], CONST0_RTX (imode))); ++ } ++ else ++ tmp = force_reg (imode, CONST0_RTX (imode)); ++ ++ dest = gen_reg_rtx (imode); ++ ++ emit_insn (unpack (dest, operands[1], tmp)); ++ emit_move_insn (operands[0], gen_lowpart (GET_MODE (operands[0]), dest)); ++ return; ++ } ++ gcc_unreachable (); ++} ++ ++/* Construct and return PARALLEL RTX with CONST_INTs for HIGH (high_p == TRUE) ++ or LOW (high_p == FALSE) half of a vector for mode MODE. */ ++ ++rtx ++loongarch_lsx_vec_parallel_const_half (machine_mode mode, bool high_p) ++{ ++ int nunits = GET_MODE_NUNITS (mode); ++ rtvec v = rtvec_alloc (nunits / 2); ++ int base; ++ int i; ++ ++ base = high_p ? nunits / 2 : 0; ++ ++ for (i = 0; i < nunits / 2; i++) ++ RTVEC_ELT (v, i) = GEN_INT (base + i); ++ ++ return gen_rtx_PARALLEL (VOIDmode, v); ++} ++ ++/* A subroutine of loongarch_expand_vec_init, match constant vector ++ elements. */ ++ ++static inline bool ++loongarch_constant_elt_p (rtx x) ++{ ++ return CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE; ++} ++ ++rtx ++loongarch_gen_const_int_vector_shuffle (machine_mode mode, int val) ++{ ++ int nunits = GET_MODE_NUNITS (mode); ++ int nsets = nunits / 4; ++ rtx elts[MAX_VECT_LEN]; ++ int set = 0; ++ int i, j; ++ ++ /* Generate a const_int vector replicating the same 4-element set ++ from an immediate. */ ++ for (j = 0; j < nsets; j++, set = 4 * j) ++ for (i = 0; i < 4; i++) ++ elts[set + i] = GEN_INT (set + ((val >> (2 * i)) & 0x3)); ++ ++ return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nunits, elts)); ++} ++ ++/* Expand a vector initialization. */ ++ ++void ++loongarch_expand_vector_init (rtx target, rtx vals) ++{ ++ machine_mode vmode = GET_MODE (target); ++ machine_mode imode = GET_MODE_INNER (vmode); ++ unsigned i, nelt = GET_MODE_NUNITS (vmode); ++ unsigned nvar = 0; ++ bool all_same = true; ++ rtx x; ++ ++ for (i = 0; i < nelt; ++i) ++ { ++ x = XVECEXP (vals, 0, i); ++ if (!loongarch_constant_elt_p (x)) ++ nvar++; ++ if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0))) ++ all_same = false; ++ } ++ ++ if (ISA_HAS_LSX) ++ { ++ if (all_same) ++ { ++ rtx same = XVECEXP (vals, 0, 0); ++ rtx temp, temp2; ++ ++ if (CONST_INT_P (same) && nvar == 0 ++ && loongarch_signed_immediate_p (INTVAL (same), 10, 0)) ++ { ++ switch (vmode) ++ { ++ case E_V16QImode: ++ case E_V8HImode: ++ case E_V4SImode: ++ case E_V2DImode: ++ temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)); ++ emit_move_insn (target, temp); ++ return; ++ ++ default: ++ gcc_unreachable (); ++ } ++ } ++ temp = gen_reg_rtx (imode); ++ if (imode == GET_MODE (same)) ++ temp2 = same; ++ else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) ++ { ++ if (GET_CODE (same) == MEM) ++ { ++ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); ++ loongarch_emit_move (reg_tmp, same); ++ temp2 = simplify_gen_subreg (imode, reg_tmp, ++ GET_MODE (reg_tmp), 0); ++ } ++ else ++ temp2 = simplify_gen_subreg (imode, same, GET_MODE (same), 0); ++ } ++ else ++ { ++ if (GET_CODE (same) == MEM) ++ { ++ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); ++ loongarch_emit_move (reg_tmp, same); ++ temp2 = lowpart_subreg (imode, reg_tmp, GET_MODE (reg_tmp)); ++ } ++ else ++ temp2 = lowpart_subreg (imode, same, GET_MODE (same)); ++ } ++ emit_move_insn (temp, temp2); ++ ++ switch (vmode) ++ { ++ case E_V16QImode: ++ case E_V8HImode: ++ case E_V4SImode: ++ case E_V2DImode: ++ loongarch_emit_move (target, gen_rtx_VEC_DUPLICATE (vmode, temp)); ++ break; ++ ++ case E_V4SFmode: ++ emit_insn (gen_lsx_vreplvei_w_f_scalar (target, temp)); ++ break; ++ ++ case E_V2DFmode: ++ emit_insn (gen_lsx_vreplvei_d_f_scalar (target, temp)); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ } ++ else ++ { ++ emit_move_insn (target, CONST0_RTX (vmode)); ++ ++ for (i = 0; i < nelt; ++i) ++ { ++ rtx temp = gen_reg_rtx (imode); ++ emit_move_insn (temp, XVECEXP (vals, 0, i)); ++ switch (vmode) ++ { ++ case E_V16QImode: ++ if (i == 0) ++ emit_insn (gen_lsx_vreplvei_b_scalar (target, temp)); ++ else ++ emit_insn (gen_vec_setv16qi (target, temp, GEN_INT (i))); ++ break; ++ ++ case E_V8HImode: ++ if (i == 0) ++ emit_insn (gen_lsx_vreplvei_h_scalar (target, temp)); ++ else ++ emit_insn (gen_vec_setv8hi (target, temp, GEN_INT (i))); ++ break; ++ ++ case E_V4SImode: ++ if (i == 0) ++ emit_insn (gen_lsx_vreplvei_w_scalar (target, temp)); ++ else ++ emit_insn (gen_vec_setv4si (target, temp, GEN_INT (i))); ++ break; ++ ++ case E_V2DImode: ++ if (i == 0) ++ emit_insn (gen_lsx_vreplvei_d_scalar (target, temp)); ++ else ++ emit_insn (gen_vec_setv2di (target, temp, GEN_INT (i))); ++ break; ++ ++ case E_V4SFmode: ++ if (i == 0) ++ emit_insn (gen_lsx_vreplvei_w_f_scalar (target, temp)); ++ else ++ emit_insn (gen_vec_setv4sf (target, temp, GEN_INT (i))); ++ break; ++ ++ case E_V2DFmode: ++ if (i == 0) ++ emit_insn (gen_lsx_vreplvei_d_f_scalar (target, temp)); ++ else ++ emit_insn (gen_vec_setv2df (target, temp, GEN_INT (i))); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ } ++ } ++ return; ++ } ++ ++ /* Load constants from the pool, or whatever's handy. */ ++ if (nvar == 0) ++ { ++ emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0))); ++ return; ++ } ++ ++ /* For two-part initialization, always use CONCAT. */ ++ if (nelt == 2) ++ { ++ rtx op0 = force_reg (imode, XVECEXP (vals, 0, 0)); ++ rtx op1 = force_reg (imode, XVECEXP (vals, 0, 1)); ++ x = gen_rtx_VEC_CONCAT (vmode, op0, op1); ++ emit_insn (gen_rtx_SET (target, x)); ++ return; ++ } ++ ++ /* Loongson is the only cpu with vectors with more elements. */ ++ gcc_assert (0); ++} ++ ++/* Implement HARD_REGNO_CALLER_SAVE_MODE. */ ++ ++machine_mode ++loongarch_hard_regno_caller_save_mode (unsigned int regno, unsigned int nregs, ++ machine_mode mode) ++{ ++ /* For performance, avoid saving/restoring upper parts of a register ++ by returning MODE as save mode when the mode is known. */ ++ if (mode == VOIDmode) ++ return choose_hard_reg_mode (regno, nregs, NULL); ++ else ++ return mode; ++} ++ ++/* Generate RTL for comparing CMP_OP0 and CMP_OP1 using condition COND and ++ store the result -1 or 0 in DEST. */ ++ ++static void ++loongarch_expand_lsx_cmp (rtx dest, enum rtx_code cond, rtx op0, rtx op1) ++{ ++ machine_mode cmp_mode = GET_MODE (op0); ++ int unspec = -1; ++ bool negate = false; ++ ++ switch (cmp_mode) ++ { ++ case E_V16QImode: ++ case E_V32QImode: ++ case E_V8HImode: ++ case E_V16HImode: ++ case E_V4SImode: ++ case E_V8SImode: ++ case E_V2DImode: ++ case E_V4DImode: ++ switch (cond) ++ { ++ case NE: ++ cond = reverse_condition (cond); ++ negate = true; ++ break; ++ case EQ: ++ case LT: ++ case LE: ++ case LTU: ++ case LEU: ++ break; ++ case GE: ++ case GT: ++ case GEU: ++ case GTU: ++ std::swap (op0, op1); ++ cond = swap_condition (cond); ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ loongarch_emit_binary (cond, dest, op0, op1); ++ if (negate) ++ emit_move_insn (dest, gen_rtx_NOT (GET_MODE (dest), dest)); ++ break; ++ ++ case E_V4SFmode: ++ case E_V2DFmode: ++ switch (cond) ++ { ++ case UNORDERED: ++ case ORDERED: ++ case EQ: ++ case NE: ++ case UNEQ: ++ case UNLE: ++ case UNLT: ++ break; ++ case LTGT: cond = NE; break; ++ case UNGE: cond = UNLE; std::swap (op0, op1); break; ++ case UNGT: cond = UNLT; std::swap (op0, op1); break; ++ case LE: unspec = UNSPEC_LSX_VFCMP_SLE; break; ++ case LT: unspec = UNSPEC_LSX_VFCMP_SLT; break; ++ case GE: unspec = UNSPEC_LSX_VFCMP_SLE; std::swap (op0, op1); break; ++ case GT: unspec = UNSPEC_LSX_VFCMP_SLT; std::swap (op0, op1); break; ++ default: ++ gcc_unreachable (); ++ } ++ if (unspec < 0) ++ loongarch_emit_binary (cond, dest, op0, op1); ++ else ++ { ++ rtx x = gen_rtx_UNSPEC (GET_MODE (dest), ++ gen_rtvec (2, op0, op1), unspec); ++ emit_insn (gen_rtx_SET (dest, x)); ++ } ++ break; ++ ++ default: ++ gcc_unreachable (); ++ break; ++ } ++} ++ ++/* Expand VEC_COND_EXPR, where: ++ MODE is mode of the result ++ VIMODE equivalent integer mode ++ OPERANDS operands of VEC_COND_EXPR. */ ++ ++void ++loongarch_expand_vec_cond_expr (machine_mode mode, machine_mode vimode, ++ rtx *operands) ++{ ++ rtx cond = operands[3]; ++ rtx cmp_op0 = operands[4]; ++ rtx cmp_op1 = operands[5]; ++ rtx cmp_res = gen_reg_rtx (vimode); ++ ++ loongarch_expand_lsx_cmp (cmp_res, GET_CODE (cond), cmp_op0, cmp_op1); ++ ++ /* We handle the following cases: ++ 1) r = a CMP b ? -1 : 0 ++ 2) r = a CMP b ? -1 : v ++ 3) r = a CMP b ? v : 0 ++ 4) r = a CMP b ? v1 : v2 */ ++ ++ /* Case (1) above. We only move the results. */ ++ if (operands[1] == CONSTM1_RTX (vimode) ++ && operands[2] == CONST0_RTX (vimode)) ++ emit_move_insn (operands[0], cmp_res); ++ else ++ { ++ rtx src1 = gen_reg_rtx (vimode); ++ rtx src2 = gen_reg_rtx (vimode); ++ rtx mask = gen_reg_rtx (vimode); ++ rtx bsel; ++ ++ /* Move the vector result to use it as a mask. */ ++ emit_move_insn (mask, cmp_res); ++ ++ if (register_operand (operands[1], mode)) ++ { ++ rtx xop1 = operands[1]; ++ if (mode != vimode) ++ { ++ xop1 = gen_reg_rtx (vimode); ++ emit_move_insn (xop1, gen_rtx_SUBREG (vimode, operands[1], 0)); ++ } ++ emit_move_insn (src1, xop1); ++ } ++ else ++ { ++ gcc_assert (operands[1] == CONSTM1_RTX (vimode)); ++ /* Case (2) if the below doesn't move the mask to src2. */ ++ emit_move_insn (src1, mask); ++ } ++ ++ if (register_operand (operands[2], mode)) ++ { ++ rtx xop2 = operands[2]; ++ if (mode != vimode) ++ { ++ xop2 = gen_reg_rtx (vimode); ++ emit_move_insn (xop2, gen_rtx_SUBREG (vimode, operands[2], 0)); ++ } ++ emit_move_insn (src2, xop2); ++ } ++ else ++ { ++ gcc_assert (operands[2] == CONST0_RTX (mode)); ++ /* Case (3) if the above didn't move the mask to src1. */ ++ emit_move_insn (src2, mask); ++ } ++ ++ /* We deal with case (4) if the mask wasn't moved to either src1 or src2. ++ In any case, we eventually do vector mask-based copy. */ ++ bsel = gen_rtx_IOR (vimode, ++ gen_rtx_AND (vimode, ++ gen_rtx_NOT (vimode, mask), src2), ++ gen_rtx_AND (vimode, mask, src1)); ++ /* The result is placed back to a register with the mask. */ ++ emit_insn (gen_rtx_SET (mask, bsel)); ++ emit_move_insn (operands[0], gen_rtx_SUBREG (mode, mask, 0)); ++ } ++} ++ ++void ++loongarch_expand_vec_cond_mask_expr (machine_mode mode, machine_mode vimode, ++ rtx *operands) ++{ ++ rtx cmp_res = operands[3]; ++ ++ /* We handle the following cases: ++ 1) r = a CMP b ? -1 : 0 ++ 2) r = a CMP b ? -1 : v ++ 3) r = a CMP b ? v : 0 ++ 4) r = a CMP b ? v1 : v2 */ ++ ++ /* Case (1) above. We only move the results. */ ++ if (operands[1] == CONSTM1_RTX (vimode) ++ && operands[2] == CONST0_RTX (vimode)) ++ emit_move_insn (operands[0], cmp_res); ++ else ++ { ++ rtx src1 = gen_reg_rtx (vimode); ++ rtx src2 = gen_reg_rtx (vimode); ++ rtx mask = gen_reg_rtx (vimode); ++ rtx bsel; ++ ++ /* Move the vector result to use it as a mask. */ ++ emit_move_insn (mask, cmp_res); ++ ++ if (register_operand (operands[1], mode)) ++ { ++ rtx xop1 = operands[1]; ++ if (mode != vimode) ++ { ++ xop1 = gen_reg_rtx (vimode); ++ emit_move_insn (xop1, gen_rtx_SUBREG (vimode, operands[1], 0)); ++ } ++ emit_move_insn (src1, xop1); ++ } ++ else ++ { ++ gcc_assert (operands[1] == CONSTM1_RTX (vimode)); ++ /* Case (2) if the below doesn't move the mask to src2. */ ++ emit_move_insn (src1, mask); ++ } ++ ++ if (register_operand (operands[2], mode)) ++ { ++ rtx xop2 = operands[2]; ++ if (mode != vimode) ++ { ++ xop2 = gen_reg_rtx (vimode); ++ emit_move_insn (xop2, gen_rtx_SUBREG (vimode, operands[2], 0)); ++ } ++ emit_move_insn (src2, xop2); ++ } ++ else ++ { ++ gcc_assert (operands[2] == CONST0_RTX (mode)); ++ /* Case (3) if the above didn't move the mask to src1. */ ++ emit_move_insn (src2, mask); ++ } ++ ++ /* We deal with case (4) if the mask wasn't moved to either src1 or src2. ++ In any case, we eventually do vector mask-based copy. */ ++ bsel = gen_rtx_IOR (vimode, ++ gen_rtx_AND (vimode, ++ gen_rtx_NOT (vimode, mask), src2), ++ gen_rtx_AND (vimode, mask, src1)); ++ /* The result is placed back to a register with the mask. */ ++ emit_insn (gen_rtx_SET (mask, bsel)); ++ emit_move_insn (operands[0], gen_rtx_SUBREG (mode, mask, 0)); ++ } ++} ++ ++/* Expand integer vector comparison */ ++bool ++loongarch_expand_vec_cmp (rtx operands[]) ++{ ++ ++ rtx_code code = GET_CODE (operands[1]); ++ loongarch_expand_lsx_cmp (operands[0], code, operands[2], operands[3]); ++ return true; ++} ++ ++/* Implement TARGET_CASE_VALUES_THRESHOLD. */ ++ ++unsigned int ++loongarch_case_values_threshold (void) ++{ ++ return default_case_values_threshold (); ++} ++ ++/* Implement TARGET_SPILL_CLASS. */ ++ ++static reg_class_t ++loongarch_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED, ++ machine_mode mode ATTRIBUTE_UNUSED) ++{ ++ return NO_REGS; ++} ++ ++/* Implement TARGET_PROMOTE_FUNCTION_MODE. */ ++ ++/* This function is equivalent to default_promote_function_mode_always_promote ++ except that it returns a promoted mode even if type is NULL_TREE. This is ++ needed by libcalls which have no type (only a mode) such as fixed conversion ++ routines that take a signed or unsigned char/short argument and convert it ++ to a fixed type. */ ++ ++static machine_mode ++loongarch_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, ++ machine_mode mode, ++ int *punsignedp ATTRIBUTE_UNUSED, ++ const_tree fntype ATTRIBUTE_UNUSED, ++ int for_return ATTRIBUTE_UNUSED) ++{ ++ int unsignedp; ++ ++ if (type != NULL_TREE) ++ return promote_mode (type, mode, punsignedp); ++ ++ unsignedp = *punsignedp; ++ PROMOTE_MODE (mode, unsignedp, type); ++ *punsignedp = unsignedp; ++ return mode; ++} ++ ++/* Implement TARGET_STARTING_FRAME_OFFSET. See loongarch_compute_frame_info ++ for details about the frame layout. */ ++ ++static HOST_WIDE_INT ++loongarch_starting_frame_offset (void) ++{ ++ if (FRAME_GROWS_DOWNWARD) ++ return 0; ++ return crtl->outgoing_args_size; ++} ++ ++/* A subroutine of loongarch_build_signbit_mask. If VECT is true, ++ then replicate the value for all elements of the vector ++ register. */ ++ ++rtx ++loongarch_build_const_vector (machine_mode mode, bool vect, rtx value) ++{ ++ int i, n_elt; ++ rtvec v; ++ machine_mode scalar_mode; ++ ++ switch (mode) ++ { ++ case E_V32QImode: ++ case E_V16QImode: ++ case E_V32HImode: ++ case E_V16HImode: ++ case E_V8HImode: ++ case E_V8SImode: ++ case E_V4SImode: ++ case E_V8DImode: ++ case E_V4DImode: ++ case E_V2DImode: ++ gcc_assert (vect); ++ /* FALLTHRU */ ++ case E_V8SFmode: ++ case E_V4SFmode: ++ case E_V8DFmode: ++ case E_V4DFmode: ++ case E_V2DFmode: ++ n_elt = GET_MODE_NUNITS (mode); ++ v = rtvec_alloc (n_elt); ++ scalar_mode = GET_MODE_INNER (mode); ++ ++ RTVEC_ELT (v, 0) = value; ++ ++ for (i = 1; i < n_elt; ++i) ++ RTVEC_ELT (v, i) = vect ? value : CONST0_RTX (scalar_mode); ++ ++ return gen_rtx_CONST_VECTOR (mode, v); ++ ++ default: ++ gcc_unreachable (); ++ } ++} ++ ++/* Create a mask for the sign bit in MODE ++ for an register. If VECT is true, then replicate the mask for ++ all elements of the vector register. If INVERT is true, then create ++ a mask excluding the sign bit. */ ++ ++rtx ++loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) ++{ ++ machine_mode vec_mode, imode; ++ wide_int w; ++ rtx mask, v; ++ ++ switch (mode) ++ { ++ case E_V16SImode: ++ case E_V16SFmode: ++ case E_V8SImode: ++ case E_V4SImode: ++ case E_V8SFmode: ++ case E_V4SFmode: ++ vec_mode = mode; ++ imode = SImode; ++ break; ++ ++ case E_V8DImode: ++ case E_V4DImode: ++ case E_V2DImode: ++ case E_V8DFmode: ++ case E_V4DFmode: ++ case E_V2DFmode: ++ vec_mode = mode; ++ imode = DImode; ++ break; ++ ++ case E_TImode: ++ case E_TFmode: ++ vec_mode = VOIDmode; ++ imode = TImode; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ machine_mode inner_mode = GET_MODE_INNER (mode); ++ w = wi::set_bit_in_zero (GET_MODE_BITSIZE (inner_mode) - 1, ++ GET_MODE_BITSIZE (inner_mode)); ++ if (invert) ++ w = wi::bit_not (w); ++ ++ /* Force this value into the low part of a fp vector constant. */ ++ mask = immed_wide_int_const (w, imode); ++ mask = gen_lowpart (inner_mode, mask); ++ ++ if (vec_mode == VOIDmode) ++ return force_reg (inner_mode, mask); ++ ++ v = loongarch_build_const_vector (vec_mode, vect, mask); ++ return force_reg (vec_mode, v); ++} ++ ++static bool ++loongarch_builtin_support_vector_misalignment (machine_mode mode, ++ const_tree type, ++ int misalignment, ++ bool is_packed) ++{ ++ if (ISA_HAS_LSX && STRICT_ALIGNMENT) ++ { ++ if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing) ++ return false; ++ if (misalignment == -1) ++ return false; ++ } ++ return default_builtin_support_vector_misalignment (mode, type, misalignment, ++ is_packed); ++} ++ ++/* Initialize the GCC target structure. */ ++#undef TARGET_ASM_ALIGNED_HI_OP ++#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t" ++#undef TARGET_ASM_ALIGNED_SI_OP ++#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t" ++#undef TARGET_ASM_ALIGNED_DI_OP ++#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t" ++ ++#undef TARGET_OPTION_OVERRIDE ++#define TARGET_OPTION_OVERRIDE loongarch_option_override ++ ++#undef TARGET_LEGITIMIZE_ADDRESS ++#define TARGET_LEGITIMIZE_ADDRESS loongarch_legitimize_address ++ ++#undef TARGET_ASM_SELECT_RTX_SECTION ++#define TARGET_ASM_SELECT_RTX_SECTION loongarch_select_rtx_section ++#undef TARGET_ASM_FUNCTION_RODATA_SECTION ++#define TARGET_ASM_FUNCTION_RODATA_SECTION loongarch_function_rodata_section ++ ++#undef TARGET_SCHED_INIT ++#define TARGET_SCHED_INIT loongarch_sched_init ++#undef TARGET_SCHED_REORDER ++#define TARGET_SCHED_REORDER loongarch_sched_reorder ++#undef TARGET_SCHED_REORDER2 ++#define TARGET_SCHED_REORDER2 loongarch_sched_reorder2 ++#undef TARGET_SCHED_VARIABLE_ISSUE ++#define TARGET_SCHED_VARIABLE_ISSUE loongarch_variable_issue ++#undef TARGET_SCHED_ADJUST_COST ++#define TARGET_SCHED_ADJUST_COST loongarch_adjust_cost ++#undef TARGET_SCHED_ISSUE_RATE ++#define TARGET_SCHED_ISSUE_RATE loongarch_issue_rate ++#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ++#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ ++ loongarch_multipass_dfa_lookahead ++ ++#undef TARGET_FUNCTION_OK_FOR_SIBCALL ++#define TARGET_FUNCTION_OK_FOR_SIBCALL loongarch_function_ok_for_sibcall ++ ++#undef TARGET_VALID_POINTER_MODE ++#define TARGET_VALID_POINTER_MODE loongarch_valid_pointer_mode ++#undef TARGET_REGISTER_MOVE_COST ++#define TARGET_REGISTER_MOVE_COST loongarch_register_move_cost ++#undef TARGET_MEMORY_MOVE_COST ++#define TARGET_MEMORY_MOVE_COST loongarch_memory_move_cost ++#undef TARGET_RTX_COSTS ++#define TARGET_RTX_COSTS loongarch_rtx_costs ++#undef TARGET_ADDRESS_COST ++#define TARGET_ADDRESS_COST loongarch_address_cost ++#undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST ++#define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \ ++ loongarch_builtin_vectorization_cost ++ ++ ++#undef TARGET_IN_SMALL_DATA_P ++#define TARGET_IN_SMALL_DATA_P loongarch_in_small_data_p ++ ++#undef TARGET_PREFERRED_RELOAD_CLASS ++#define TARGET_PREFERRED_RELOAD_CLASS loongarch_preferred_reload_class ++ ++#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE ++#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true ++ ++#undef TARGET_EXPAND_BUILTIN_VA_START ++#define TARGET_EXPAND_BUILTIN_VA_START loongarch_va_start ++ ++#undef TARGET_PROMOTE_FUNCTION_MODE ++#define TARGET_PROMOTE_FUNCTION_MODE loongarch_promote_function_mode ++#undef TARGET_RETURN_IN_MEMORY ++#define TARGET_RETURN_IN_MEMORY loongarch_return_in_memory ++ ++#undef TARGET_FUNCTION_VALUE ++#define TARGET_FUNCTION_VALUE loongarch_function_value ++#undef TARGET_LIBCALL_VALUE ++#define TARGET_LIBCALL_VALUE loongarch_libcall_value ++ ++#undef TARGET_ASM_OUTPUT_MI_THUNK ++#define TARGET_ASM_OUTPUT_MI_THUNK loongarch_output_mi_thunk ++#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK ++#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \ ++ hook_bool_const_tree_hwi_hwi_const_tree_true ++ ++#undef TARGET_PRINT_OPERAND ++#define TARGET_PRINT_OPERAND loongarch_print_operand ++#undef TARGET_PRINT_OPERAND_ADDRESS ++#define TARGET_PRINT_OPERAND_ADDRESS loongarch_print_operand_address ++#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P ++#define TARGET_PRINT_OPERAND_PUNCT_VALID_P \ ++ loongarch_print_operand_punct_valid_p ++ ++#undef TARGET_SETUP_INCOMING_VARARGS ++#define TARGET_SETUP_INCOMING_VARARGS loongarch_setup_incoming_varargs ++#undef TARGET_STRICT_ARGUMENT_NAMING ++#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true ++#undef TARGET_MUST_PASS_IN_STACK ++#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size ++#undef TARGET_PASS_BY_REFERENCE ++#define TARGET_PASS_BY_REFERENCE loongarch_pass_by_reference ++#undef TARGET_ARG_PARTIAL_BYTES ++#define TARGET_ARG_PARTIAL_BYTES loongarch_arg_partial_bytes ++#undef TARGET_FUNCTION_ARG ++#define TARGET_FUNCTION_ARG loongarch_function_arg ++#undef TARGET_FUNCTION_ARG_ADVANCE ++#define TARGET_FUNCTION_ARG_ADVANCE loongarch_function_arg_advance ++#undef TARGET_FUNCTION_ARG_BOUNDARY ++#define TARGET_FUNCTION_ARG_BOUNDARY loongarch_function_arg_boundary ++ ++#undef TARGET_VECTOR_MODE_SUPPORTED_P ++#define TARGET_VECTOR_MODE_SUPPORTED_P loongarch_vector_mode_supported_p ++ ++#undef TARGET_SCALAR_MODE_SUPPORTED_P ++#define TARGET_SCALAR_MODE_SUPPORTED_P loongarch_scalar_mode_supported_p ++ ++#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE ++#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE loongarch_preferred_simd_mode ++ ++#undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_MODES ++#define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_MODES \ ++ loongarch_autovectorize_vector_modes ++ ++#undef TARGET_INIT_BUILTINS + #define TARGET_INIT_BUILTINS loongarch_init_builtins + #undef TARGET_BUILTIN_DECL + #define TARGET_BUILTIN_DECL loongarch_builtin_decl +@@ -7006,6 +8886,14 @@ loongarch_set_handled_components (sbitmap components) + + #undef TARGET_MAX_ANCHOR_OFFSET + #define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1) ++#undef TARGET_VECTORIZE_VEC_PERM_CONST ++#define TARGET_VECTORIZE_VEC_PERM_CONST loongarch_vectorize_vec_perm_const ++ ++#undef TARGET_SCHED_REASSOCIATION_WIDTH ++#define TARGET_SCHED_REASSOCIATION_WIDTH loongarch_sched_reassociation_width ++ ++#undef TARGET_CASE_VALUES_THRESHOLD ++#define TARGET_CASE_VALUES_THRESHOLD loongarch_case_values_threshold + + #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV + #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV loongarch_atomic_assign_expand_fenv +@@ -7024,6 +8912,10 @@ loongarch_set_handled_components (sbitmap components) + #undef TARGET_MODES_TIEABLE_P + #define TARGET_MODES_TIEABLE_P loongarch_modes_tieable_p + ++#undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED ++#define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \ ++ loongarch_hard_regno_call_part_clobbered ++ + #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS + #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 2 + +@@ -7074,6 +8966,10 @@ loongarch_set_handled_components (sbitmap components) + #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS \ + loongarch_set_handled_components + ++#undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT ++#define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \ ++ loongarch_builtin_support_vector_misalignment ++ + struct gcc_target targetm = TARGET_INITIALIZER; + + #include "gt-loongarch.h" +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index e0c9c9439..c3ebea2f2 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -23,6 +23,8 @@ along with GCC; see the file COPYING3. If not see + + #include "config/loongarch/loongarch-opts.h" + ++#define TARGET_SUPPORTS_WIDE_INT 1 ++ + /* Macros to silence warnings about numbers being signed in traditional + C and unsigned in ISO C when compiled on 32-bit hosts. */ + +@@ -179,6 +181,11 @@ along with GCC; see the file COPYING3. If not see + #define MIN_UNITS_PER_WORD 4 + #endif + ++/* Width of a LSX vector register in bytes. */ ++#define UNITS_PER_LSX_REG 16 ++/* Width of a LSX vector register in bits. */ ++#define BITS_PER_LSX_REG (UNITS_PER_LSX_REG * BITS_PER_UNIT) ++ + /* For LARCH, width of a floating point register. */ + #define UNITS_PER_FPREG (TARGET_DOUBLE_FLOAT ? 8 : 4) + +@@ -241,8 +248,10 @@ along with GCC; see the file COPYING3. If not see + #define STRUCTURE_SIZE_BOUNDARY 8 + + /* There is no point aligning anything to a rounder boundary than +- LONG_DOUBLE_TYPE_SIZE. */ +-#define BIGGEST_ALIGNMENT (LONG_DOUBLE_TYPE_SIZE) ++ LONG_DOUBLE_TYPE_SIZE, unless under LSX the bigggest alignment is ++ BITS_PER_LSX_REG/.. */ ++#define BIGGEST_ALIGNMENT \ ++ (ISA_HAS_LSX ? BITS_PER_LSX_REG : LONG_DOUBLE_TYPE_SIZE) + + /* All accesses must be aligned. */ + #define STRICT_ALIGNMENT (TARGET_STRICT_ALIGN) +@@ -378,6 +387,9 @@ along with GCC; see the file COPYING3. If not see + #define FP_REG_FIRST 32 + #define FP_REG_LAST 63 + #define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1) ++#define LSX_REG_FIRST FP_REG_FIRST ++#define LSX_REG_LAST FP_REG_LAST ++#define LSX_REG_NUM FP_REG_NUM + + /* The DWARF 2 CFA column which tracks the return address from a + signal handler context. This means that to maintain backwards +@@ -395,8 +407,11 @@ along with GCC; see the file COPYING3. If not see + ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM) + #define FCC_REG_P(REGNO) \ + ((unsigned int) ((int) (REGNO) - FCC_REG_FIRST) < FCC_REG_NUM) ++#define LSX_REG_P(REGNO) \ ++ ((unsigned int) ((int) (REGNO) - LSX_REG_FIRST) < LSX_REG_NUM) + + #define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X))) ++#define LSX_REG_RTX_P(X) (REG_P (X) && LSX_REG_P (REGNO (X))) + + /* Select a register mode required for caller save of hard regno REGNO. */ + #define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \ +@@ -577,6 +592,11 @@ enum reg_class + #define IMM12_OPERAND(VALUE) \ + ((unsigned HOST_WIDE_INT) (VALUE) + IMM_REACH / 2 < IMM_REACH) + ++/* True if VALUE is a signed 13-bit number. */ ++ ++#define IMM13_OPERAND(VALUE) \ ++ ((unsigned HOST_WIDE_INT) (VALUE) + 0x1000 < 0x2000) ++ + /* True if VALUE is a signed 16-bit number. */ + + #define IMM16_OPERAND(VALUE) \ +@@ -706,6 +726,13 @@ enum reg_class + #define FP_ARG_FIRST (FP_REG_FIRST + 0) + #define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) + ++/* True if MODE is vector and supported in a LSX vector register. */ ++#define LSX_SUPPORTED_MODE_P(MODE) \ ++ (ISA_HAS_LSX \ ++ && GET_MODE_SIZE (MODE) == UNITS_PER_LSX_REG \ ++ && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT \ ++ || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT)) ++ + /* 1 if N is a possible register number for function argument passing. + We have no FP argument registers when soft-float. */ + +@@ -926,7 +953,39 @@ typedef struct { + { "s7", 30 + GP_REG_FIRST }, \ + { "s8", 31 + GP_REG_FIRST }, \ + { "v0", 4 + GP_REG_FIRST }, \ +- { "v1", 5 + GP_REG_FIRST } \ ++ { "v1", 5 + GP_REG_FIRST }, \ ++ { "vr0", 0 + FP_REG_FIRST }, \ ++ { "vr1", 1 + FP_REG_FIRST }, \ ++ { "vr2", 2 + FP_REG_FIRST }, \ ++ { "vr3", 3 + FP_REG_FIRST }, \ ++ { "vr4", 4 + FP_REG_FIRST }, \ ++ { "vr5", 5 + FP_REG_FIRST }, \ ++ { "vr6", 6 + FP_REG_FIRST }, \ ++ { "vr7", 7 + FP_REG_FIRST }, \ ++ { "vr8", 8 + FP_REG_FIRST }, \ ++ { "vr9", 9 + FP_REG_FIRST }, \ ++ { "vr10", 10 + FP_REG_FIRST }, \ ++ { "vr11", 11 + FP_REG_FIRST }, \ ++ { "vr12", 12 + FP_REG_FIRST }, \ ++ { "vr13", 13 + FP_REG_FIRST }, \ ++ { "vr14", 14 + FP_REG_FIRST }, \ ++ { "vr15", 15 + FP_REG_FIRST }, \ ++ { "vr16", 16 + FP_REG_FIRST }, \ ++ { "vr17", 17 + FP_REG_FIRST }, \ ++ { "vr18", 18 + FP_REG_FIRST }, \ ++ { "vr19", 19 + FP_REG_FIRST }, \ ++ { "vr20", 20 + FP_REG_FIRST }, \ ++ { "vr21", 21 + FP_REG_FIRST }, \ ++ { "vr22", 22 + FP_REG_FIRST }, \ ++ { "vr23", 23 + FP_REG_FIRST }, \ ++ { "vr24", 24 + FP_REG_FIRST }, \ ++ { "vr25", 25 + FP_REG_FIRST }, \ ++ { "vr26", 26 + FP_REG_FIRST }, \ ++ { "vr27", 27 + FP_REG_FIRST }, \ ++ { "vr28", 28 + FP_REG_FIRST }, \ ++ { "vr29", 29 + FP_REG_FIRST }, \ ++ { "vr30", 30 + FP_REG_FIRST }, \ ++ { "vr31", 31 + FP_REG_FIRST } \ + } + + /* Globalizing directive for a label. */ +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 2d269794f..fb3828262 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -158,11 +158,12 @@ + const,signext,pick_ins,logical,arith,sll0,andi,shift_shift" + (const_string "unknown")) + +-(define_attr "alu_type" "unknown,add,sub,not,nor,and,or,xor" ++(define_attr "alu_type" "unknown,add,sub,not,nor,and,or,xor,simd_add" + (const_string "unknown")) + + ;; Main data type used by the insn +-(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FCC" ++(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FCC, ++ V2DI,V4SI,V8HI,V16QI,V2DF,V4SF" + (const_string "unknown")) + + ;; True if the main data type is twice the size of a word. +@@ -234,7 +235,12 @@ + prefetch,prefetchx,condmove,mgtf,mftg,const,arith,logical, + shift,slt,signext,clz,trap,imul,idiv,move, + fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,flogb,fneg,fcmp,fcopysign,fcvt, +- fscaleb,fsqrt,frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost" ++ fscaleb,fsqrt,frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost, ++ simd_div,simd_fclass,simd_flog2,simd_fadd,simd_fcvt,simd_fmul,simd_fmadd, ++ simd_fdiv,simd_bitins,simd_bitmov,simd_insert,simd_sld,simd_mul,simd_fcmp, ++ simd_fexp2,simd_int_arith,simd_bit,simd_shift,simd_splat,simd_fill, ++ simd_permute,simd_shf,simd_sat,simd_pcnt,simd_copy,simd_branch,simd_clsx, ++ simd_fminmax,simd_logic,simd_move,simd_load,simd_store" + (cond [(eq_attr "jirl" "!unset") (const_string "call") + (eq_attr "got" "load") (const_string "load") + +@@ -414,11 +420,20 @@ + + ;; This attribute gives the upper-case mode name for one unit of a + ;; floating-point mode or vector mode. +-(define_mode_attr UNITMODE [(SF "SF") (DF "DF")]) ++(define_mode_attr UNITMODE [(SF "SF") (DF "DF") (V2SF "SF") (V4SF "SF") ++ (V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI") ++ (V2DF "DF")]) ++ ++;; As above, but in lower case. ++(define_mode_attr unitmode [(SF "sf") (DF "df") (V2SF "sf") (V4SF "sf") ++ (V16QI "qi") (V8QI "qi") (V8HI "hi") (V4HI "hi") ++ (V4SI "si") (V2SI "si") (V2DI "di") (V2DF "df")]) + + ;; This attribute gives the integer mode that has half the size of + ;; the controlling mode. +-(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")]) ++(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (V2SF "SI") ++ (V2SI "SI") (V4HI "SI") (V8QI "SI") ++ (TF "DI")]) + + ;; This attribute gives the integer mode that has the same size of a + ;; floating-point mode. +@@ -445,6 +460,18 @@ + ;; from the same template. + (define_code_iterator any_div [div udiv mod umod]) + ++;; This code iterator allows addition and subtraction to be generated ++;; from the same template. ++(define_code_iterator addsub [plus minus]) ++ ++;; This code iterator allows addition and multiplication to be generated ++;; from the same template. ++(define_code_iterator addmul [plus mult]) ++ ++;; This code iterator allows addition subtraction and multiplication to be ++;; generated from the same template ++(define_code_iterator addsubmul [plus minus mult]) ++ + ;; This code iterator allows all native floating-point comparisons to be + ;; generated from the same template. + (define_code_iterator fcond [unordered uneq unlt unle eq lt le +@@ -684,7 +711,6 @@ + [(set_attr "alu_type" "sub") + (set_attr "mode" "")]) + +- + (define_insn "*subsi3_extended" + [(set (match_operand:DI 0 "register_operand" "= r") + (sign_extend:DI +@@ -1228,7 +1254,7 @@ + "fmina.\t%0,%1,%2" + [(set_attr "type" "fmove") + (set_attr "mode" "")]) +- ++ + ;; + ;; .................... + ;; +@@ -2541,7 +2567,6 @@ + [(set_attr "type" "shift,shift") + (set_attr "mode" "")]) + +- + ;; The following templates were added to generate "bstrpick.d + alsl.d" + ;; instruction pairs. + ;; It is required that the values of const_immalsl_operand and +@@ -3610,6 +3635,9 @@ + (include "generic.md") + (include "la464.md") + ++; The LoongArch SX Instructions. ++(include "lsx.md") ++ + (define_c_enum "unspec" [ + UNSPEC_ADDRESS_FIRST + ]) +diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt +index f96d32769..8cc0c1d0b 100644 +--- a/gcc/config/loongarch/loongarch.opt ++++ b/gcc/config/loongarch/loongarch.opt +@@ -153,6 +153,10 @@ mbranch-cost= + Target RejectNegative Joined UInteger Var(loongarch_branch_cost) + -mbranch-cost=COST Set the cost of branches to roughly COST instructions. + ++mmemvec-cost= ++Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) ++mmemvec-cost=COST Set the cost of vector memory access instructions. ++ + mcheck-zero-division + Target Mask(CHECK_ZERO_DIV) + Trap on integer divide by zero. +diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md +new file mode 100644 +index 000000000..fb4d228ba +--- /dev/null ++++ b/gcc/config/loongarch/lsx.md +@@ -0,0 +1,4467 @@ ++;; Machine Description for LARCH Loongson SX ASE ++;; ++;; Copyright (C) 2018 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++;; ++ ++(define_c_enum "unspec" [ ++ UNSPEC_LSX_ABSD_S ++ UNSPEC_LSX_VABSD_U ++ UNSPEC_LSX_VAVG_S ++ UNSPEC_LSX_VAVG_U ++ UNSPEC_LSX_VAVGR_S ++ UNSPEC_LSX_VAVGR_U ++ UNSPEC_LSX_VBITCLR ++ UNSPEC_LSX_VBITCLRI ++ UNSPEC_LSX_VBITREV ++ UNSPEC_LSX_VBITREVI ++ UNSPEC_LSX_VBITSET ++ UNSPEC_LSX_VBITSETI ++ UNSPEC_LSX_BRANCH_V ++ UNSPEC_LSX_BRANCH ++ UNSPEC_LSX_VFCMP_CAF ++ UNSPEC_LSX_VFCLASS ++ UNSPEC_LSX_VFCMP_CUNE ++ UNSPEC_LSX_VFCVT ++ UNSPEC_LSX_VFCVTH ++ UNSPEC_LSX_VFCVTL ++ UNSPEC_LSX_VFLOGB ++ UNSPEC_LSX_VFRECIP ++ UNSPEC_LSX_VFRINT ++ UNSPEC_LSX_VFRSQRT ++ UNSPEC_LSX_VFCMP_SAF ++ UNSPEC_LSX_VFCMP_SEQ ++ UNSPEC_LSX_VFCMP_SLE ++ UNSPEC_LSX_VFCMP_SLT ++ UNSPEC_LSX_VFCMP_SNE ++ UNSPEC_LSX_VFCMP_SOR ++ UNSPEC_LSX_VFCMP_SUEQ ++ UNSPEC_LSX_VFCMP_SULE ++ UNSPEC_LSX_VFCMP_SULT ++ UNSPEC_LSX_VFCMP_SUN ++ UNSPEC_LSX_VFCMP_SUNE ++ UNSPEC_LSX_VFTINT_S ++ UNSPEC_LSX_VFTINT_U ++ UNSPEC_LSX_VSAT_S ++ UNSPEC_LSX_VSAT_U ++ UNSPEC_LSX_VREPLVEI ++ UNSPEC_LSX_VSRAR ++ UNSPEC_LSX_VSRARI ++ UNSPEC_LSX_VSRLR ++ UNSPEC_LSX_VSRLRI ++ UNSPEC_LSX_VSHUF ++ UNSPEC_LSX_VMUH_S ++ UNSPEC_LSX_VMUH_U ++ UNSPEC_LSX_VEXTW_S ++ UNSPEC_LSX_VEXTW_U ++ UNSPEC_LSX_VSLLWIL_S ++ UNSPEC_LSX_VSLLWIL_U ++ UNSPEC_LSX_VSRAN ++ UNSPEC_LSX_VSSRAN_S ++ UNSPEC_LSX_VSSRAN_U ++ UNSPEC_LSX_VSRAIN ++ UNSPEC_LSX_VSRAINS_S ++ UNSPEC_LSX_VSRAINS_U ++ UNSPEC_LSX_VSRARN ++ UNSPEC_LSX_VSRLN ++ UNSPEC_LSX_VSRLRN ++ UNSPEC_LSX_VSSRLRN_U ++ UNSPEC_LSX_VFRSTPI ++ UNSPEC_LSX_VFRSTP ++ UNSPEC_LSX_VSHUF4I ++ UNSPEC_LSX_VBSRL_V ++ UNSPEC_LSX_VBSLL_V ++ UNSPEC_LSX_VEXTRINS ++ UNSPEC_LSX_VMSKLTZ ++ UNSPEC_LSX_VSIGNCOV ++ UNSPEC_LSX_VFTINTRNE ++ UNSPEC_LSX_VFTINTRP ++ UNSPEC_LSX_VFTINTRM ++ UNSPEC_LSX_VFTINT_W_D ++ UNSPEC_LSX_VFFINT_S_L ++ UNSPEC_LSX_VFTINTRZ_W_D ++ UNSPEC_LSX_VFTINTRP_W_D ++ UNSPEC_LSX_VFTINTRM_W_D ++ UNSPEC_LSX_VFTINTRNE_W_D ++ UNSPEC_LSX_VFTINTL_L_S ++ UNSPEC_LSX_VFFINTH_D_W ++ UNSPEC_LSX_VFFINTL_D_W ++ UNSPEC_LSX_VFTINTRZL_L_S ++ UNSPEC_LSX_VFTINTRZH_L_S ++ UNSPEC_LSX_VFTINTRPL_L_S ++ UNSPEC_LSX_VFTINTRPH_L_S ++ UNSPEC_LSX_VFTINTRMH_L_S ++ UNSPEC_LSX_VFTINTRML_L_S ++ UNSPEC_LSX_VFTINTRNEL_L_S ++ UNSPEC_LSX_VFTINTRNEH_L_S ++ UNSPEC_LSX_VFTINTH_L_H ++ UNSPEC_LSX_VFRINTRNE_S ++ UNSPEC_LSX_VFRINTRNE_D ++ UNSPEC_LSX_VFRINTRZ_S ++ UNSPEC_LSX_VFRINTRZ_D ++ UNSPEC_LSX_VFRINTRP_S ++ UNSPEC_LSX_VFRINTRP_D ++ UNSPEC_LSX_VFRINTRM_S ++ UNSPEC_LSX_VFRINTRM_D ++ UNSPEC_LSX_VSSRARN_S ++ UNSPEC_LSX_VSSRARN_U ++ UNSPEC_LSX_VSSRLN_U ++ UNSPEC_LSX_VSSRLN ++ UNSPEC_LSX_VSSRLRN ++ UNSPEC_LSX_VLDI ++ UNSPEC_LSX_VSHUF_B ++ UNSPEC_LSX_VLDX ++ UNSPEC_LSX_VSTX ++ UNSPEC_LSX_VEXTL_QU_DU ++ UNSPEC_LSX_VSETEQZ_V ++ UNSPEC_LSX_VADDWEV ++ UNSPEC_LSX_VADDWEV2 ++ UNSPEC_LSX_VADDWEV3 ++ UNSPEC_LSX_VADDWOD ++ UNSPEC_LSX_VADDWOD2 ++ UNSPEC_LSX_VADDWOD3 ++ UNSPEC_LSX_VSUBWEV ++ UNSPEC_LSX_VSUBWEV2 ++ UNSPEC_LSX_VSUBWOD ++ UNSPEC_LSX_VSUBWOD2 ++ UNSPEC_LSX_VMULWEV ++ UNSPEC_LSX_VMULWEV2 ++ UNSPEC_LSX_VMULWEV3 ++ UNSPEC_LSX_VMULWOD ++ UNSPEC_LSX_VMULWOD2 ++ UNSPEC_LSX_VMULWOD3 ++ UNSPEC_LSX_VHADDW_Q_D ++ UNSPEC_LSX_VHADDW_QU_DU ++ UNSPEC_LSX_VHSUBW_Q_D ++ UNSPEC_LSX_VHSUBW_QU_DU ++ UNSPEC_LSX_VMADDWEV ++ UNSPEC_LSX_VMADDWEV2 ++ UNSPEC_LSX_VMADDWEV3 ++ UNSPEC_LSX_VMADDWOD ++ UNSPEC_LSX_VMADDWOD2 ++ UNSPEC_LSX_VMADDWOD3 ++ UNSPEC_LSX_VROTR ++ UNSPEC_LSX_VADD_Q ++ UNSPEC_LSX_VSUB_Q ++ UNSPEC_LSX_VEXTH_Q_D ++ UNSPEC_LSX_VEXTH_QU_DU ++ UNSPEC_LSX_VMSKGEZ ++ UNSPEC_LSX_VMSKNZ ++ UNSPEC_LSX_VEXTL_Q_D ++ UNSPEC_LSX_VSRLNI ++ UNSPEC_LSX_VSRLRNI ++ UNSPEC_LSX_VSSRLNI ++ UNSPEC_LSX_VSSRLNI2 ++ UNSPEC_LSX_VSSRLRNI ++ UNSPEC_LSX_VSSRLRNI2 ++ UNSPEC_LSX_VSRANI ++ UNSPEC_LSX_VSRARNI ++ UNSPEC_LSX_VSSRANI ++ UNSPEC_LSX_VSSRANI2 ++ UNSPEC_LSX_VSSRARNI ++ UNSPEC_LSX_VSSRARNI2 ++ UNSPEC_LSX_VPERMI ++]) ++ ++;; This attribute gives suffix for integers in VHMODE. ++(define_mode_attr dlsxfmt ++ [(V2DI "q") ++ (V4SI "d") ++ (V8HI "w") ++ (V16QI "h")]) ++ ++(define_mode_attr dlsxfmt_u ++ [(V2DI "qu") ++ (V4SI "du") ++ (V8HI "wu") ++ (V16QI "hu")]) ++ ++(define_mode_attr d2lsxfmt ++ [(V4SI "q") ++ (V8HI "d") ++ (V16QI "w")]) ++ ++(define_mode_attr d2lsxfmt_u ++ [(V4SI "qu") ++ (V8HI "du") ++ (V16QI "wu")]) ++ ++;; The attribute gives two double modes for vector modes. ++(define_mode_attr VD2MODE ++ [(V4SI "V2DI") ++ (V8HI "V2DI") ++ (V16QI "V4SI")]) ++ ++;; All vector modes with 128 bits. ++(define_mode_iterator LSX [V2DF V4SF V2DI V4SI V8HI V16QI]) ++ ++;; Same as LSX. Used by vcond to iterate two modes. ++(define_mode_iterator LSX_2 [V2DF V4SF V2DI V4SI V8HI V16QI]) ++ ++;; Only used for vilvh and splitting insert_d and copy_{u,s}.d. ++(define_mode_iterator LSX_D [V2DI V2DF]) ++ ++;; Only used for copy_{u,s}.w and vilvh. ++(define_mode_iterator LSX_W [V4SI V4SF]) ++ ++;; Only integer modes. ++(define_mode_iterator ILSX [V2DI V4SI V8HI V16QI]) ++ ++;; As ILSX but excludes V16QI. ++(define_mode_iterator ILSX_DWH [V2DI V4SI V8HI]) ++ ++;; As LSX but excludes V16QI. ++(define_mode_iterator LSX_DWH [V2DF V4SF V2DI V4SI V8HI]) ++ ++;; As ILSX but excludes V2DI. ++(define_mode_iterator ILSX_WHB [V4SI V8HI V16QI]) ++ ++;; Only integer modes equal or larger than a word. ++(define_mode_iterator ILSX_DW [V2DI V4SI]) ++ ++;; Only integer modes smaller than a word. ++(define_mode_iterator ILSX_HB [V8HI V16QI]) ++ ++;;;; Only integer modes for fixed-point madd_q/maddr_q. ++;;(define_mode_iterator ILSX_WH [V4SI V8HI]) ++ ++;; Only floating-point modes. ++(define_mode_iterator FLSX [V2DF V4SF]) ++ ++;; Only used for immediate set shuffle elements instruction. ++(define_mode_iterator LSX_WHB_W [V4SI V8HI V16QI V4SF]) ++ ++;; The attribute gives the integer vector mode with same size. ++(define_mode_attr VIMODE ++ [(V2DF "V2DI") ++ (V4SF "V4SI") ++ (V2DI "V2DI") ++ (V4SI "V4SI") ++ (V8HI "V8HI") ++ (V16QI "V16QI")]) ++ ++;; The attribute gives half modes for vector modes. ++(define_mode_attr VHMODE ++ [(V8HI "V16QI") ++ (V4SI "V8HI") ++ (V2DI "V4SI")]) ++ ++;; The attribute gives double modes for vector modes. ++(define_mode_attr VDMODE ++ [(V2DI "V2DI") ++ (V4SI "V2DI") ++ (V8HI "V4SI") ++ (V16QI "V8HI")]) ++ ++;; The attribute gives half modes with same number of elements for vector modes. ++(define_mode_attr VTRUNCMODE ++ [(V8HI "V8QI") ++ (V4SI "V4HI") ++ (V2DI "V2SI")]) ++ ++;; Double-sized Vector MODE with same elemet type. "Vector, Enlarged-MODE" ++(define_mode_attr VEMODE ++ [(V4SF "V8SF") ++ (V4SI "V8SI") ++ (V2DI "V4DI") ++ (V2DF "V4DF")]) ++ ++;; This attribute gives the mode of the result for "vpickve2gr_b, copy_u_b" etc. ++(define_mode_attr VRES ++ [(V2DF "DF") ++ (V4SF "SF") ++ (V2DI "DI") ++ (V4SI "SI") ++ (V8HI "SI") ++ (V16QI "SI")]) ++ ++;; Only used with LSX_D iterator. ++(define_mode_attr lsx_d ++ [(V2DI "reg_or_0") ++ (V2DF "register")]) ++ ++;; This attribute gives the integer vector mode with same size. ++(define_mode_attr mode_i ++ [(V2DF "v2di") ++ (V4SF "v4si") ++ (V2DI "v2di") ++ (V4SI "v4si") ++ (V8HI "v8hi") ++ (V16QI "v16qi")]) ++ ++;; This attribute gives suffix for LSX instructions. ++(define_mode_attr lsxfmt ++ [(V2DF "d") ++ (V4SF "w") ++ (V2DI "d") ++ (V4SI "w") ++ (V8HI "h") ++ (V16QI "b")]) ++ ++;; This attribute gives suffix for LSX instructions. ++(define_mode_attr lsxfmt_u ++ [(V2DF "du") ++ (V4SF "wu") ++ (V2DI "du") ++ (V4SI "wu") ++ (V8HI "hu") ++ (V16QI "bu")]) ++ ++;; This attribute gives suffix for integers in VHMODE. ++(define_mode_attr hlsxfmt ++ [(V2DI "w") ++ (V4SI "h") ++ (V8HI "b")]) ++ ++;; This attribute gives suffix for integers in VHMODE. ++(define_mode_attr hlsxfmt_u ++ [(V2DI "wu") ++ (V4SI "hu") ++ (V8HI "bu")]) ++ ++;; This attribute gives define_insn suffix for LSX instructions that need ++;; distinction between integer and floating point. ++(define_mode_attr lsxfmt_f ++ [(V2DF "d_f") ++ (V4SF "w_f") ++ (V2DI "d") ++ (V4SI "w") ++ (V8HI "h") ++ (V16QI "b")]) ++ ++(define_mode_attr flsxfmt_f ++ [(V2DF "d_f") ++ (V4SF "s_f") ++ (V2DI "d") ++ (V4SI "w") ++ (V8HI "h") ++ (V16QI "b")]) ++ ++(define_mode_attr flsxfmt ++ [(V2DF "d") ++ (V4SF "s") ++ (V2DI "d") ++ (V4SI "s")]) ++ ++(define_mode_attr flsxfrint ++ [(V2DF "d") ++ (V4SF "s")]) ++ ++(define_mode_attr ilsxfmt ++ [(V2DF "l") ++ (V4SF "w")]) ++ ++(define_mode_attr ilsxfmt_u ++ [(V2DF "lu") ++ (V4SF "wu")]) ++ ++;; This is used to form an immediate operand constraint using ++;; "const__operand". ++(define_mode_attr indeximm ++ [(V2DF "0_or_1") ++ (V4SF "0_to_3") ++ (V2DI "0_or_1") ++ (V4SI "0_to_3") ++ (V8HI "uimm3") ++ (V16QI "uimm4")]) ++ ++;; This attribute represents bitmask needed for vec_merge using ++;; "const__operand". ++(define_mode_attr bitmask ++ [(V2DF "exp_2") ++ (V4SF "exp_4") ++ (V2DI "exp_2") ++ (V4SI "exp_4") ++ (V8HI "exp_8") ++ (V16QI "exp_16")]) ++ ++;; This attribute is used to form an immediate operand constraint using ++;; "const__operand". ++(define_mode_attr bitimm ++ [(V16QI "uimm3") ++ (V8HI "uimm4") ++ (V4SI "uimm5") ++ (V2DI "uimm6")]) ++ ++ ++(define_int_iterator FRINT_S [UNSPEC_LSX_VFRINTRP_S ++ UNSPEC_LSX_VFRINTRZ_S ++ UNSPEC_LSX_VFRINT ++ UNSPEC_LSX_VFRINTRM_S]) ++ ++(define_int_iterator FRINT_D [UNSPEC_LSX_VFRINTRP_D ++ UNSPEC_LSX_VFRINTRZ_D ++ UNSPEC_LSX_VFRINT ++ UNSPEC_LSX_VFRINTRM_D]) ++ ++(define_int_attr frint_pattern_s ++ [(UNSPEC_LSX_VFRINTRP_S "ceil") ++ (UNSPEC_LSX_VFRINTRZ_S "btrunc") ++ (UNSPEC_LSX_VFRINT "rint") ++ (UNSPEC_LSX_VFRINTRM_S "floor")]) ++ ++(define_int_attr frint_pattern_d ++ [(UNSPEC_LSX_VFRINTRP_D "ceil") ++ (UNSPEC_LSX_VFRINTRZ_D "btrunc") ++ (UNSPEC_LSX_VFRINT "rint") ++ (UNSPEC_LSX_VFRINTRM_D "floor")]) ++ ++(define_int_attr frint_suffix ++ [(UNSPEC_LSX_VFRINTRP_S "rp") ++ (UNSPEC_LSX_VFRINTRP_D "rp") ++ (UNSPEC_LSX_VFRINTRZ_S "rz") ++ (UNSPEC_LSX_VFRINTRZ_D "rz") ++ (UNSPEC_LSX_VFRINT "") ++ (UNSPEC_LSX_VFRINTRM_S "rm") ++ (UNSPEC_LSX_VFRINTRM_D "rm")]) ++ ++(define_expand "vec_init" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand:LSX 1 "")] ++ "ISA_HAS_LSX" ++{ ++ loongarch_expand_vector_init (operands[0], operands[1]); ++ DONE; ++}) ++ ++;; vpickev pattern with implicit type conversion. ++(define_insn "vec_pack_trunc_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vec_concat: ++ (truncate: ++ (match_operand:ILSX_DWH 1 "register_operand" "f")) ++ (truncate: ++ (match_operand:ILSX_DWH 2 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vpickev.\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "")]) ++ ++(define_expand "vec_unpacks_hi_v4sf" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (float_extend:V2DF ++ (vec_select:V2SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_dup 2))))] ++ "ISA_HAS_LSX" ++{ ++ operands[2] = loongarch_lsx_vec_parallel_const_half (V4SFmode, ++ true/*high_p*/); ++}) ++ ++(define_expand "vec_unpacks_lo_v4sf" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (float_extend:V2DF ++ (vec_select:V2SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_dup 2))))] ++ "ISA_HAS_LSX" ++{ ++ operands[2] = loongarch_lsx_vec_parallel_const_half (V4SFmode, ++ false/*high_p*/); ++}) ++ ++(define_expand "vec_unpacks_hi_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX_WHB 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, true/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacks_lo_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX_WHB 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, false/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacku_hi_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX_WHB 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, true/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacku_lo_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX_WHB 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, false/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_extract" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX 1 "register_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LSX" ++{ ++ if (mode == QImode || mode == HImode) ++ { ++ rtx dest1 = gen_reg_rtx (SImode); ++ emit_insn (gen_lsx_vpickve2gr_ (dest1, operands[1], operands[2])); ++ emit_move_insn (operands[0], ++ gen_lowpart (mode, dest1)); ++ } ++ else ++ emit_insn (gen_lsx_vpickve2gr_ (operands[0], operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_expand "vec_extract" ++ [(match_operand: 0 "register_operand") ++ (match_operand:FLSX 1 "register_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx temp; ++ HOST_WIDE_INT val = INTVAL (operands[2]); ++ ++ if (val == 0) ++ temp = operands[1]; ++ else ++ { ++ rtx n = GEN_INT (val * GET_MODE_SIZE (mode)); ++ temp = gen_reg_rtx (mode); ++ emit_insn (gen_lsx_vbsrl_ (temp, operands[1], n)); ++ } ++ emit_insn (gen_lsx_vec_extract_ (operands[0], temp)); ++ DONE; ++}) ++ ++(define_insn_and_split "lsx_vec_extract_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vec_select: ++ (match_operand:FLSX 1 "register_operand" "f") ++ (parallel [(const_int 0)])))] ++ "ISA_HAS_LSX" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 0) (match_dup 1))] ++{ ++ operands[1] = gen_rtx_REG (mode, REGNO (operands[1])); ++} ++ [(set_attr "move_type" "fmove") ++ (set_attr "mode" "")]) ++ ++(define_expand "vec_set" ++ [(match_operand:ILSX 0 "register_operand") ++ (match_operand: 1 "reg_or_0_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx index = GEN_INT (1 << INTVAL (operands[2])); ++ emit_insn (gen_lsx_vinsgr2vr_ (operands[0], operands[1], ++ operands[0], index)); ++ DONE; ++}) ++ ++(define_expand "vec_set" ++ [(match_operand:FLSX 0 "register_operand") ++ (match_operand: 1 "register_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx index = GEN_INT (1 << INTVAL (operands[2])); ++ emit_insn (gen_lsx_vextrins__scalar (operands[0], operands[1], ++ operands[0], index)); ++ DONE; ++}) ++ ++(define_expand "vec_cmp" ++ [(set (match_operand: 0 "register_operand") ++ (match_operator 1 "" ++ [(match_operand:LSX 2 "register_operand") ++ (match_operand:LSX 3 "register_operand")]))] ++ "ISA_HAS_LSX" ++{ ++ bool ok = loongarch_expand_vec_cmp (operands); ++ gcc_assert (ok); ++ DONE; ++}) ++ ++(define_expand "vec_cmpu" ++ [(set (match_operand: 0 "register_operand") ++ (match_operator 1 "" ++ [(match_operand:ILSX 2 "register_operand") ++ (match_operand:ILSX 3 "register_operand")]))] ++ "ISA_HAS_LSX" ++{ ++ bool ok = loongarch_expand_vec_cmp (operands); ++ gcc_assert (ok); ++ DONE; ++}) ++ ++(define_expand "vcondu" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand:LSX 1 "reg_or_m1_operand") ++ (match_operand:LSX 2 "reg_or_0_operand") ++ (match_operator 3 "" ++ [(match_operand:ILSX 4 "register_operand") ++ (match_operand:ILSX 5 "register_operand")])] ++ "ISA_HAS_LSX ++ && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" ++{ ++ loongarch_expand_vec_cond_expr (mode, mode, operands); ++ DONE; ++}) ++ ++(define_expand "vcond" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand:LSX 1 "reg_or_m1_operand") ++ (match_operand:LSX 2 "reg_or_0_operand") ++ (match_operator 3 "" ++ [(match_operand:LSX_2 4 "register_operand") ++ (match_operand:LSX_2 5 "register_operand")])] ++ "ISA_HAS_LSX ++ && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" ++{ ++ loongarch_expand_vec_cond_expr (mode, mode, operands); ++ DONE; ++}) ++ ++(define_expand "vcond_mask_" ++ [(match_operand:ILSX 0 "register_operand") ++ (match_operand:ILSX 1 "reg_or_m1_operand") ++ (match_operand:ILSX 2 "reg_or_0_operand") ++ (match_operand:ILSX 3 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ loongarch_expand_vec_cond_mask_expr (mode, ++ mode, operands); ++ DONE; ++}) ++ ++(define_insn "lsx_vinsgr2vr_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (vec_merge:ILSX ++ (vec_duplicate:ILSX ++ (match_operand: 1 "reg_or_0_operand" "rJ")) ++ (match_operand:ILSX 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")))] ++ "ISA_HAS_LSX" ++{ ++ if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode)) ++ return "#"; ++ else ++ return "vinsgr2vr.\t%w0,%z1,%y3"; ++} ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ ++(define_split ++ [(set (match_operand:LSX_D 0 "register_operand") ++ (vec_merge:LSX_D ++ (vec_duplicate:LSX_D ++ (match_operand: 1 "_operand")) ++ (match_operand:LSX_D 2 "register_operand") ++ (match_operand 3 "const__operand")))] ++ "reload_completed && ISA_HAS_LSX && !TARGET_64BIT" ++ [(const_int 0)] ++{ ++ loongarch_split_lsx_insert_d (operands[0], operands[2], operands[3], operands[1]); ++ DONE; ++}) ++ ++(define_insn "lsx_vextrins__internal" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (vec_merge:LSX ++ (vec_duplicate:LSX ++ (vec_select: ++ (match_operand:LSX 1 "register_operand" "f") ++ (parallel [(const_int 0)]))) ++ (match_operand:LSX 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")))] ++ "ISA_HAS_LSX" ++ "vextrins.\t%w0,%w1,%y3<<4" ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ ++;; Operand 3 is a scalar. ++(define_insn "lsx_vextrins__scalar" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (vec_merge:FLSX ++ (vec_duplicate:FLSX ++ (match_operand: 1 "register_operand" "f")) ++ (match_operand:FLSX 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")))] ++ "ISA_HAS_LSX" ++ "vextrins.\t%w0,%w1,%y3<<4" ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vpickve2gr_" ++ [(set (match_operand: 0 "register_operand" "=r") ++ (any_extend: ++ (vec_select: ++ (match_operand:ILSX_HB 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const__operand" "")]))))] ++ "ISA_HAS_LSX" ++ "vpickve2gr.\t%0,%w1,%2" ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vpickve2gr_" ++ [(set (match_operand: 0 "register_operand" "=r") ++ (any_extend: ++ (vec_select: ++ (match_operand:LSX_W 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const__operand" "")]))))] ++ "ISA_HAS_LSX" ++ "vpickve2gr.\t%0,%w1,%2" ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "")]) ++ ++(define_insn_and_split "lsx_vpickve2gr_du" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (vec_select:DI ++ (match_operand:V2DI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_or_1_operand" "")])))] ++ "ISA_HAS_LSX" ++{ ++ if (TARGET_64BIT) ++ return "vpickve2gr.du\t%0,%w1,%2"; ++ else ++ return "#"; ++} ++ "reload_completed && ISA_HAS_LSX && !TARGET_64BIT" ++ [(const_int 0)] ++{ ++ loongarch_split_lsx_copy_d (operands[0], operands[1], operands[2], ++ gen_lsx_vpickve2gr_wu); ++ DONE; ++} ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn_and_split "lsx_vpickve2gr_" ++ [(set (match_operand: 0 "register_operand" "=r") ++ (vec_select: ++ (match_operand:LSX_D 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const__operand" "")])))] ++ "ISA_HAS_LSX" ++{ ++ if (TARGET_64BIT) ++ return "vpickve2gr.\t%0,%w1,%2"; ++ else ++ return "#"; ++} ++ "reload_completed && ISA_HAS_LSX && !TARGET_64BIT" ++ [(const_int 0)] ++{ ++ loongarch_split_lsx_copy_d (operands[0], operands[1], operands[2], ++ gen_lsx_vpickve2gr_w); ++ DONE; ++} ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "")]) ++ ++ ++(define_expand "abs2" ++ [(match_operand:ILSX 0 "register_operand" "=f") ++ (abs:ILSX (match_operand:ILSX 1 "register_operand" "f"))] ++ "ISA_HAS_LSX" ++{ ++ if (ISA_HAS_LSX) ++ { ++ emit_insn (gen_vabs2 (operands[0], operands[1])); ++ DONE; ++ } ++ else ++ { ++ rtx reg = gen_reg_rtx (mode); ++ emit_move_insn (reg, CONST0_RTX (mode)); ++ emit_insn (gen_lsx_vadda_ (operands[0], operands[1], reg)); ++ DONE; ++ } ++}) ++ ++(define_expand "neg2" ++ [(set (match_operand:ILSX 0 "register_operand") ++ (neg:ILSX (match_operand:ILSX 1 "register_operand")))] ++ "ISA_HAS_LSX" ++{ ++ emit_insn (gen_vneg2 (operands[0], operands[1])); ++ DONE; ++}) ++ ++(define_expand "neg2" ++ [(set (match_operand:FLSX 0 "register_operand") ++ (neg:FLSX (match_operand:FLSX 1 "register_operand")))] ++ "ISA_HAS_LSX" ++{ ++ rtx reg = gen_reg_rtx (mode); ++ emit_move_insn (reg, CONST0_RTX (mode)); ++ emit_insn (gen_sub3 (operands[0], reg, operands[1])); ++ DONE; ++}) ++ ++(define_expand "lsx_vrepli" ++ [(match_operand:ILSX 0 "register_operand") ++ (match_operand 1 "const_imm10_operand")] ++ "ISA_HAS_LSX" ++{ ++ if (mode == V16QImode) ++ operands[1] = GEN_INT (trunc_int_for_mode (INTVAL (operands[1]), ++ mode)); ++ emit_move_insn (operands[0], ++ loongarch_gen_const_int_vector (mode, INTVAL (operands[1]))); ++ DONE; ++}) ++ ++(define_expand "vec_perm" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand:LSX 1 "register_operand") ++ (match_operand:LSX 2 "register_operand") ++ (match_operand:LSX 3 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ loongarch_expand_vec_perm (operands[0], operands[1], ++ operands[2], operands[3]); ++ DONE; ++}) ++ ++(define_insn "lsx_vshuf_" ++ [(set (match_operand:LSX_DWH 0 "register_operand" "=f") ++ (unspec:LSX_DWH [(match_operand:LSX_DWH 1 "register_operand" "0") ++ (match_operand:LSX_DWH 2 "register_operand" "f") ++ (match_operand:LSX_DWH 3 "register_operand" "f")] ++ UNSPEC_LSX_VSHUF))] ++ "ISA_HAS_LSX" ++ "vshuf.\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_sld") ++ (set_attr "mode" "")]) ++ ++(define_expand "mov" ++ [(set (match_operand:LSX 0) ++ (match_operand:LSX 1))] ++ "ISA_HAS_LSX" ++{ ++ if (loongarch_legitimize_move (mode, operands[0], operands[1])) ++ DONE; ++}) ++ ++(define_expand "movmisalign" ++ [(set (match_operand:LSX 0) ++ (match_operand:LSX 1))] ++ "ISA_HAS_LSX" ++{ ++ if (loongarch_legitimize_move (mode, operands[0], operands[1])) ++ DONE; ++}) ++ ++(define_insn "mov_lsx" ++ [(set (match_operand:LSX 0 "nonimmediate_operand" "=f,f,R,*r,*f") ++ (match_operand:LSX 1 "move_operand" "fYGYI,R,f,*f,*r"))] ++ "ISA_HAS_LSX" ++{ return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "type" "simd_move,simd_load,simd_store,simd_copy,simd_insert") ++ (set_attr "mode" "")]) ++ ++(define_split ++ [(set (match_operand:LSX 0 "nonimmediate_operand") ++ (match_operand:LSX 1 "move_operand"))] ++ "reload_completed && ISA_HAS_LSX ++ && loongarch_split_move_insn_p (operands[0], operands[1])" ++ [(const_int 0)] ++{ ++ loongarch_split_move_insn (operands[0], operands[1], curr_insn); ++ DONE; ++}) ++ ++;; Offset load ++(define_expand "lsx_ld_" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq10_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (operands[0], gen_rtx_MEM (mode, addr)); ++ DONE; ++}) ++ ++;; Offset store ++(define_expand "lsx_st_" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq10_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (gen_rtx_MEM (mode, addr), operands[0]); ++ DONE; ++}) ++ ++;; Integer operations ++(define_insn "add3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f,f") ++ (plus:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_ximm5_operand" "f,Unv5,Uuv5")))] ++ "ISA_HAS_LSX" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "vadd.\t%w0,%w1,%w2"; ++ case 1: ++ { ++ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0)); ++ ++ operands[2] = GEN_INT (-val); ++ return "vsubi.\t%w0,%w1,%d2"; ++ } ++ case 2: ++ return "vaddi.\t%w0,%w1,%E2"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "alu_type" "simd_add") ++ (set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "sub3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (minus:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LSX" ++ "@ ++ vsub.\t%w0,%w1,%w2 ++ vsubi.\t%w0,%w1,%E2" ++ [(set_attr "alu_type" "simd_add") ++ (set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "mul3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (mult:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vmul.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vmadd_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (plus:ILSX (mult:ILSX (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand:ILSX 3 "register_operand" "f")) ++ (match_operand:ILSX 1 "register_operand" "0")))] ++ "ISA_HAS_LSX" ++ "vmadd.\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vmsub_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (minus:ILSX (match_operand:ILSX 1 "register_operand" "0") ++ (mult:ILSX (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand:ILSX 3 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vmsub.\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++(define_insn "div3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (div:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++{ return loongarch_lsx_output_division ("vdiv.\t%w0,%w1,%w2", operands); } ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "udiv3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (udiv:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++{ return loongarch_lsx_output_division ("vdiv.\t%w0,%w1,%w2", operands); } ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "mod3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (mod:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++{ return loongarch_lsx_output_division ("vmod.\t%w0,%w1,%w2", operands); } ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "umod3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (umod:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++{ return loongarch_lsx_output_division ("vmod.\t%w0,%w1,%w2", operands); } ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "xor3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f,f") ++ (xor:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] ++ "ISA_HAS_LSX" ++ "@ ++ vxor.v\t%w0,%w1,%w2 ++ vbitrevi.%v0\t%w0,%w1,%V2 ++ vxori.b\t%w0,%w1,%B2" ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "ior3" ++ [(set (match_operand:LSX 0 "register_operand" "=f,f,f") ++ (ior:LSX ++ (match_operand:LSX 1 "register_operand" "f,f,f") ++ (match_operand:LSX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] ++ "ISA_HAS_LSX" ++ "@ ++ vor.v\t%w0,%w1,%w2 ++ vbitseti.%v0\t%w0,%w1,%V2 ++ vori.b\t%w0,%w1,%B2" ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "and3" ++ [(set (match_operand:LSX 0 "register_operand" "=f,f,f") ++ (and:LSX ++ (match_operand:LSX 1 "register_operand" "f,f,f") ++ (match_operand:LSX 2 "reg_or_vector_same_val_operand" "f,YZ,Urv8")))] ++ "ISA_HAS_LSX" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "vand.v\t%w0,%w1,%w2"; ++ case 1: ++ { ++ rtx elt0 = CONST_VECTOR_ELT (operands[2], 0); ++ unsigned HOST_WIDE_INT val = ~UINTVAL (elt0); ++ operands[2] = loongarch_gen_const_int_vector (mode, val & (-val)); ++ return "vbitclri.%v0\t%w0,%w1,%V2"; ++ } ++ case 2: ++ return "vandi.b\t%w0,%w1,%B2"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "one_cmpl2" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (not:ILSX (match_operand:ILSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vnor.v\t%w0,%w1,%w1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "TI")]) ++ ++(define_insn "vlshr3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (lshiftrt:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LSX" ++ "@ ++ vsrl.\t%w0,%w1,%w2 ++ vsrli.\t%w0,%w1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "vashr3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (ashiftrt:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LSX" ++ "@ ++ vsra.\t%w0,%w1,%w2 ++ vsrai.\t%w0,%w1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "vashl3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (ashift:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LSX" ++ "@ ++ vsll.\t%w0,%w1,%w2 ++ vslli.\t%w0,%w1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; Floating-point operations ++(define_insn "add3" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (plus:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfadd.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "sub3" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (minus:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfsub.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "mul3" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (mult:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfmul.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fmul") ++ (set_attr "mode" "")]) ++ ++(define_insn "div3" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (div:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfdiv.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "fma4" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (fma:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f") ++ (match_operand:FLSX 3 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfmadd.\t%w0,%w1,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "fnma4" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (fma:FLSX (neg:FLSX (match_operand:FLSX 1 "register_operand" "f")) ++ (match_operand:FLSX 2 "register_operand" "f") ++ (match_operand:FLSX 3 "register_operand" "0")))] ++ "ISA_HAS_LSX" ++ "vfnmsub.\t%w0,%w1,%w2,%w0" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "sqrt2" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (sqrt:FLSX (match_operand:FLSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfsqrt.\t%w0,%w1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++;; Built-in functions ++(define_insn "lsx_vadda_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (plus:ILSX (abs:ILSX (match_operand:ILSX 1 "register_operand" "f")) ++ (abs:ILSX (match_operand:ILSX 2 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vadda.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "ssadd3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (ss_plus:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vsadd.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "usadd3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (us_plus:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vsadd.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vabsd_s_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_ABSD_S))] ++ "ISA_HAS_LSX" ++ "vabsd.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vabsd_u_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VABSD_U))] ++ "ISA_HAS_LSX" ++ "vabsd.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vavg_s_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VAVG_S))] ++ "ISA_HAS_LSX" ++ "vavg.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vavg_u_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VAVG_U))] ++ "ISA_HAS_LSX" ++ "vavg.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vavgr_s_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VAVGR_S))] ++ "ISA_HAS_LSX" ++ "vavgr.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vavgr_u_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VAVGR_U))] ++ "ISA_HAS_LSX" ++ "vavgr.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitclr_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VBITCLR))] ++ "ISA_HAS_LSX" ++ "vbitclr.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitclri_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VBITCLRI))] ++ "ISA_HAS_LSX" ++ "vbitclri.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitrev_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VBITREV))] ++ "ISA_HAS_LSX" ++ "vbitrev.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitrevi_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const_lsx_branch_operand" "")] ++ UNSPEC_LSX_VBITREVI))] ++ "ISA_HAS_LSX" ++ "vbitrevi.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitsel_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (ior:ILSX (and:ILSX (not:ILSX ++ (match_operand:ILSX 3 "register_operand" "f")) ++ (match_operand:ILSX 1 "register_operand" "f")) ++ (and:ILSX (match_dup 3) ++ (match_operand:ILSX 2 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vbitsel.v\t%w0,%w1,%w2,%w3" ++ [(set_attr "type" "simd_bitmov") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitseli_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (ior:V16QI (and:V16QI (not:V16QI ++ (match_operand:V16QI 1 "register_operand" "0")) ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (and:V16QI (match_dup 1) ++ (match_operand:V16QI 3 "const_vector_same_val_operand" "Urv8"))))] ++ "ISA_HAS_LSX" ++ "vbitseli.b\t%w0,%w2,%B3" ++ [(set_attr "type" "simd_bitmov") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vbitset_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VBITSET))] ++ "ISA_HAS_LSX" ++ "vbitset.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitseti_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VBITSETI))] ++ "ISA_HAS_LSX" ++ "vbitseti.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_code_iterator ICC [eq le leu lt ltu]) ++ ++(define_code_attr icc ++ [(eq "eq") ++ (le "le") ++ (leu "le") ++ (lt "lt") ++ (ltu "lt")]) ++ ++(define_code_attr icci ++ [(eq "eqi") ++ (le "lei") ++ (leu "lei") ++ (lt "lti") ++ (ltu "lti")]) ++ ++(define_code_attr cmpi ++ [(eq "s") ++ (le "s") ++ (leu "u") ++ (lt "s") ++ (ltu "u")]) ++ ++(define_code_attr cmpi_1 ++ [(eq "") ++ (le "") ++ (leu "u") ++ (lt "") ++ (ltu "u")]) ++ ++(define_insn "lsx_vs_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (ICC:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_imm5_operand" "f,Uv5")))] ++ "ISA_HAS_LSX" ++ "@ ++ vs.\t%w0,%w1,%w2 ++ vs.\t%w0,%w1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfclass_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFCLASS))] ++ "ISA_HAS_LSX" ++ "vfclass.\t%w0,%w1" ++ [(set_attr "type" "simd_fclass") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfcmp_caf_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VFCMP_CAF))] ++ "ISA_HAS_LSX" ++ "vfcmp.caf.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfcmp_cune_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VFCMP_CUNE))] ++ "ISA_HAS_LSX" ++ "vfcmp.cune.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++(define_code_iterator vfcond [unordered ordered eq ne le lt uneq unle unlt]) ++ ++(define_code_attr fcc ++ [(unordered "cun") ++ (ordered "cor") ++ (eq "ceq") ++ (ne "cne") ++ (uneq "cueq") ++ (unle "cule") ++ (unlt "cult") ++ (le "cle") ++ (lt "clt")]) ++ ++(define_int_iterator FSC_UNS [UNSPEC_LSX_VFCMP_SAF UNSPEC_LSX_VFCMP_SUN UNSPEC_LSX_VFCMP_SOR ++ UNSPEC_LSX_VFCMP_SEQ UNSPEC_LSX_VFCMP_SNE UNSPEC_LSX_VFCMP_SUEQ ++ UNSPEC_LSX_VFCMP_SUNE UNSPEC_LSX_VFCMP_SULE UNSPEC_LSX_VFCMP_SULT ++ UNSPEC_LSX_VFCMP_SLE UNSPEC_LSX_VFCMP_SLT]) ++ ++(define_int_attr fsc ++ [(UNSPEC_LSX_VFCMP_SAF "saf") ++ (UNSPEC_LSX_VFCMP_SUN "sun") ++ (UNSPEC_LSX_VFCMP_SOR "sor") ++ (UNSPEC_LSX_VFCMP_SEQ "seq") ++ (UNSPEC_LSX_VFCMP_SNE "sne") ++ (UNSPEC_LSX_VFCMP_SUEQ "sueq") ++ (UNSPEC_LSX_VFCMP_SUNE "sune") ++ (UNSPEC_LSX_VFCMP_SULE "sule") ++ (UNSPEC_LSX_VFCMP_SULT "sult") ++ (UNSPEC_LSX_VFCMP_SLE "sle") ++ (UNSPEC_LSX_VFCMP_SLT "slt")]) ++ ++(define_insn "lsx_vfcmp__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vfcond: (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfcmp..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfcmp__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")] ++ FSC_UNS))] ++ "ISA_HAS_LSX" ++ "vfcmp..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++(define_mode_attr fint ++ [(V4SF "v4si") ++ (V2DF "v2di")]) ++ ++(define_mode_attr FINTCNV ++ [(V4SF "I2S") ++ (V2DF "I2D")]) ++ ++(define_mode_attr FINTCNV_2 ++ [(V4SF "S2I") ++ (V2DF "D2I")]) ++ ++(define_insn "float2" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (float:FLSX (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vffint..\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "floatuns2" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unsigned_float:FLSX ++ (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vffint..\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_mode_attr FFQ ++ [(V4SF "V8HI") ++ (V2DF "V4SI")]) ++ ++(define_insn "lsx_vreplgr2vr_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (vec_duplicate:ILSX ++ (match_operand: 1 "reg_or_0_operand" "r,J")))] ++ "ISA_HAS_LSX" ++{ ++ if (which_alternative == 1) ++ return "ldi.\t%w0,0"; ++ ++ if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode)) ++ return "#"; ++ else ++ return "vreplgr2vr.\t%w0,%z1"; ++} ++ [(set_attr "type" "simd_fill") ++ (set_attr "mode" "")]) ++ ++(define_split ++ [(set (match_operand:LSX_D 0 "register_operand") ++ (vec_duplicate:LSX_D ++ (match_operand: 1 "register_operand")))] ++ "reload_completed && ISA_HAS_LSX && !TARGET_64BIT" ++ [(const_int 0)] ++{ ++ loongarch_split_lsx_fill_d (operands[0], operands[1]); ++ DONE; ++}) ++ ++(define_insn "logb2" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFLOGB))] ++ "ISA_HAS_LSX" ++ "vflogb.\t%w0,%w1" ++ [(set_attr "type" "simd_flog2") ++ (set_attr "mode" "")]) ++ ++(define_insn "smax3" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (smax:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfmax.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfmaxa_" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (if_then_else:FLSX ++ (gt (abs:FLSX (match_operand:FLSX 1 "register_operand" "f")) ++ (abs:FLSX (match_operand:FLSX 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "ISA_HAS_LSX" ++ "vfmaxa.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "smin3" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (smin:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfmin.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfmina_" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (if_then_else:FLSX ++ (lt (abs:FLSX (match_operand:FLSX 1 "register_operand" "f")) ++ (abs:FLSX (match_operand:FLSX 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "ISA_HAS_LSX" ++ "vfmina.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfrecip_" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRECIP))] ++ "ISA_HAS_LSX" ++ "vfrecip.\t%w0,%w1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfrint_" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINT))] ++ "ISA_HAS_LSX" ++ "vfrint.\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfrsqrt_" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRSQRT))] ++ "ISA_HAS_LSX" ++ "vfrsqrt.\t%w0,%w1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vftint_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINT_S))] ++ "ISA_HAS_LSX" ++ "vftint..\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vftint_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINT_U))] ++ "ISA_HAS_LSX" ++ "vftint..\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "fix_trunc2" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (fix: (match_operand:FLSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vftintrz..\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "fixuns_trunc2" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unsigned_fix: (match_operand:FLSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vftintrz..\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vhw_h_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (addsub:V8HI ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LSX" ++ "vhw.h.b\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vhw_w_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (addsub:V4SI ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LSX" ++ "vhw.w.h\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vhw_d_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (addsub:V2DI ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3)]))) ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2)])))))] ++ "ISA_HAS_LSX" ++ "vhw.d.w\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vpackev_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (vec_select:V16QI ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 16) ++ (const_int 2) (const_int 18) ++ (const_int 4) (const_int 20) ++ (const_int 6) (const_int 22) ++ (const_int 8) (const_int 24) ++ (const_int 10) (const_int 26) ++ (const_int 12) (const_int 28) ++ (const_int 14) (const_int 30)])))] ++ "ISA_HAS_LSX" ++ "vpackev.b\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vpackev_h" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (vec_select:V8HI ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (match_operand:V8HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 2) (const_int 10) ++ (const_int 4) (const_int 12) ++ (const_int 6) (const_int 14)])))] ++ "ISA_HAS_LSX" ++ "vpackev.h\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vpackev_w" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (vec_select:V4SI ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (match_operand:V4SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 2) (const_int 6)])))] ++ "ISA_HAS_LSX" ++ "vpackev.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vpackev_w_f" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_select:V4SF ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_operand:V4SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 2) (const_int 6)])))] ++ "ISA_HAS_LSX" ++ "vpackev.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vilvh_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (vec_select:V16QI ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (parallel [(const_int 8) (const_int 24) ++ (const_int 9) (const_int 25) ++ (const_int 10) (const_int 26) ++ (const_int 11) (const_int 27) ++ (const_int 12) (const_int 28) ++ (const_int 13) (const_int 29) ++ (const_int 14) (const_int 30) ++ (const_int 15) (const_int 31)])))] ++ "ISA_HAS_LSX" ++ "vilvh.b\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vilvh_h" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (vec_select:V8HI ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (match_operand:V8HI 2 "register_operand" "f")) ++ (parallel [(const_int 4) (const_int 12) ++ (const_int 5) (const_int 13) ++ (const_int 6) (const_int 14) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LSX" ++ "vilvh.h\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8HI")]) ++ ++(define_mode_attr vilvh_suffix ++ [(V4SI "") (V4SF "_f") ++ (V2DI "") (V2DF "_f")]) ++ ++(define_insn "lsx_vilvh_w" ++ [(set (match_operand:LSX_W 0 "register_operand" "=f") ++ (vec_select:LSX_W ++ (vec_concat: ++ (match_operand:LSX_W 1 "register_operand" "f") ++ (match_operand:LSX_W 2 "register_operand" "f")) ++ (parallel [(const_int 2) (const_int 6) ++ (const_int 3) (const_int 7)])))] ++ "ISA_HAS_LSX" ++ "vilvh.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vilvh_d" ++ [(set (match_operand:LSX_D 0 "register_operand" "=f") ++ (vec_select:LSX_D ++ (vec_concat: ++ (match_operand:LSX_D 1 "register_operand" "f") ++ (match_operand:LSX_D 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3)])))] ++ "ISA_HAS_LSX" ++ "vilvh.d\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vpackod_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (vec_select:V16QI ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 17) ++ (const_int 3) (const_int 19) ++ (const_int 5) (const_int 21) ++ (const_int 7) (const_int 23) ++ (const_int 9) (const_int 25) ++ (const_int 11) (const_int 27) ++ (const_int 13) (const_int 29) ++ (const_int 15) (const_int 31)])))] ++ "ISA_HAS_LSX" ++ "vpackod.b\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vpackod_h" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (vec_select:V8HI ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (match_operand:V8HI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 9) ++ (const_int 3) (const_int 11) ++ (const_int 5) (const_int 13) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LSX" ++ "vpackod.h\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vpackod_w" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (vec_select:V4SI ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (match_operand:V4SI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 5) ++ (const_int 3) (const_int 7)])))] ++ "ISA_HAS_LSX" ++ "vpackod.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vpackod_w_f" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_select:V4SF ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_operand:V4SF 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 5) ++ (const_int 3) (const_int 7)])))] ++ "ISA_HAS_LSX" ++ "vpackod.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vilvl_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (vec_select:V16QI ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 16) ++ (const_int 1) (const_int 17) ++ (const_int 2) (const_int 18) ++ (const_int 3) (const_int 19) ++ (const_int 4) (const_int 20) ++ (const_int 5) (const_int 21) ++ (const_int 6) (const_int 22) ++ (const_int 7) (const_int 23)])))] ++ "ISA_HAS_LSX" ++ "vilvl.b\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vilvl_h" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (vec_select:V8HI ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (match_operand:V8HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 1) (const_int 9) ++ (const_int 2) (const_int 10) ++ (const_int 3) (const_int 11)])))] ++ "ISA_HAS_LSX" ++ "vilvl.h\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vilvl_w" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (vec_select:V4SI ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (match_operand:V4SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 1) (const_int 5)])))] ++ "ISA_HAS_LSX" ++ "vilvl.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vilvl_w_f" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_select:V4SF ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_operand:V4SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 1) (const_int 5)])))] ++ "ISA_HAS_LSX" ++ "vilvl.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vilvl_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (vec_select:V2DI ++ (vec_concat:V4DI ++ (match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2)])))] ++ "ISA_HAS_LSX" ++ "vilvl.d\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vilvl_d_f" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (vec_select:V2DF ++ (vec_concat:V4DF ++ (match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2)])))] ++ "ISA_HAS_LSX" ++ "vilvl.d\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "smax3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (smax:ILSX (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] ++ "ISA_HAS_LSX" ++ "@ ++ vmax.\t%w0,%w1,%w2 ++ vmaxi.\t%w0,%w1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "umax3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (umax:ILSX (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LSX" ++ "@ ++ vmax.\t%w0,%w1,%w2 ++ vmaxi.\t%w0,%w1,%B2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "smin3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (smin:ILSX (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] ++ "ISA_HAS_LSX" ++ "@ ++ vmin.\t%w0,%w1,%w2 ++ vmini.\t%w0,%w1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "umin3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (umin:ILSX (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LSX" ++ "@ ++ vmin.\t%w0,%w1,%w2 ++ vmini.\t%w0,%w1,%B2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vclo_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (clz:ILSX (not:ILSX (match_operand:ILSX 1 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vclo.\t%w0,%w1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "clz2" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (clz:ILSX (match_operand:ILSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vclz.\t%w0,%w1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_nor_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (and:ILSX (not:ILSX (match_operand:ILSX 1 "register_operand" "f,f")) ++ (not:ILSX (match_operand:ILSX 2 "reg_or_vector_same_val_operand" "f,Urv8"))))] ++ "ISA_HAS_LSX" ++ "@ ++ vnor.v\t%w0,%w1,%w2 ++ vnori.b\t%w0,%w1,%B2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vpickev_b" ++[(set (match_operand:V16QI 0 "register_operand" "=f") ++ (vec_select:V16QI ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))] ++ "ISA_HAS_LSX" ++ "vpickev.b\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vpickev_h" ++[(set (match_operand:V8HI 0 "register_operand" "=f") ++ (vec_select:V8HI ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (match_operand:V8HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))] ++ "ISA_HAS_LSX" ++ "vpickev.h\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vpickev_w" ++[(set (match_operand:V4SI 0 "register_operand" "=f") ++ (vec_select:V4SI ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (match_operand:V4SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))] ++ "ISA_HAS_LSX" ++ "vpickev.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vpickev_w_f" ++[(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_select:V4SF ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_operand:V4SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))] ++ "ISA_HAS_LSX" ++ "vpickev.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vpickod_b" ++[(set (match_operand:V16QI 0 "register_operand" "=f") ++ (vec_select:V16QI ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)])))] ++ "ISA_HAS_LSX" ++ "vpickod.b\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vpickod_h" ++[(set (match_operand:V8HI 0 "register_operand" "=f") ++ (vec_select:V8HI ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (match_operand:V8HI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)])))] ++ "ISA_HAS_LSX" ++ "vpickod.h\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vpickod_w" ++[(set (match_operand:V4SI 0 "register_operand" "=f") ++ (vec_select:V4SI ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (match_operand:V4SI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))] ++ "ISA_HAS_LSX" ++ "vpickod.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vpickod_w_f" ++[(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_select:V4SF ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_operand:V4SF 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))] ++ "ISA_HAS_LSX" ++ "vpickod.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "popcount2" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (popcount:ILSX (match_operand:ILSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vpcnt.\t%w0,%w1" ++ [(set_attr "type" "simd_pcnt") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsat_s_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSAT_S))] ++ "ISA_HAS_LSX" ++ "vsat.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_sat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsat_u_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSAT_U))] ++ "ISA_HAS_LSX" ++ "vsat.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_sat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vshuf4i_" ++ [(set (match_operand:LSX_WHB_W 0 "register_operand" "=f") ++ (vec_select:LSX_WHB_W ++ (match_operand:LSX_WHB_W 1 "register_operand" "f") ++ (match_operand 2 "par_const_vector_shf_set_operand" "")))] ++ "ISA_HAS_LSX" ++{ ++ HOST_WIDE_INT val = 0; ++ unsigned int i; ++ ++ /* We convert the selection to an immediate. */ ++ for (i = 0; i < 4; i++) ++ val |= INTVAL (XVECEXP (operands[2], 0, i)) << (2 * i); ++ ++ operands[2] = GEN_INT (val); ++ return "vshuf4i.\t%w0,%w1,%X2"; ++} ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrar_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VSRAR))] ++ "ISA_HAS_LSX" ++ "vsrar.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrari_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSRARI))] ++ "ISA_HAS_LSX" ++ "vsrari.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrlr_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VSRLR))] ++ "ISA_HAS_LSX" ++ "vsrlr.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrlri_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSRLRI))] ++ "ISA_HAS_LSX" ++ "vsrlri.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssub_s_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (ss_minus:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vssub.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssub_u_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (us_minus:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vssub.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vreplve_" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (vec_duplicate:LSX ++ (vec_select: ++ (match_operand:LSX 1 "register_operand" "f") ++ (parallel [(match_operand:SI 2 "register_operand" "r")]))))] ++ "ISA_HAS_LSX" ++ "vreplve.\t%w0,%w1,%z2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vreplvei_" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (vec_duplicate:LSX ++ (vec_select: ++ (match_operand:LSX 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const__operand" "")]))))] ++ "ISA_HAS_LSX" ++ "vreplvei.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vreplvei__scalar" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (vec_duplicate:LSX ++ (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vreplvei.\t%w0,%w1,0" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfcvt_h_s" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "f") ++ (match_operand:V4SF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFCVT))] ++ "ISA_HAS_LSX" ++ "vfcvt.h.s\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vfcvt_s_d" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFCVT))] ++ "ISA_HAS_LSX" ++ "vfcvt.s.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "vec_pack_trunc_v2df" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_concat:V4SF ++ (float_truncate:V2SF (match_operand:V2DF 1 "register_operand" "f")) ++ (float_truncate:V2SF (match_operand:V2DF 2 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vfcvt.s.d\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfcvth_s_h" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "f")] ++ UNSPEC_LSX_VFCVTH))] ++ "ISA_HAS_LSX" ++ "vfcvth.s.h\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfcvth_d_s" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (float_extend:V2DF ++ (vec_select:V2SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (parallel [(const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LSX" ++ "vfcvth.d.s\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vfcvtl_s_h" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "f")] ++ UNSPEC_LSX_VFCVTL))] ++ "ISA_HAS_LSX" ++ "vfcvtl.s.h\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfcvtl_d_s" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (float_extend:V2DF ++ (vec_select:V2SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1)]))))] ++ "ISA_HAS_LSX" ++ "vfcvtl.d.s\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V2DF")]) ++ ++(define_code_attr lsxbr ++ [(eq "bz") ++ (ne "bnz")]) ++ ++(define_code_attr lsxeq_v ++ [(eq "eqz") ++ (ne "nez")]) ++ ++(define_code_attr lsxne_v ++ [(eq "nez") ++ (ne "eqz")]) ++ ++(define_code_attr lsxeq ++ [(eq "anyeqz") ++ (ne "allnez")]) ++ ++(define_code_attr lsxne ++ [(eq "allnez") ++ (ne "anyeqz")]) ++ ++(define_insn "lsx__" ++ [(set (pc) (if_then_else ++ (equality_op ++ (unspec:SI [(match_operand:LSX 1 "register_operand" "f")] ++ UNSPEC_LSX_BRANCH) ++ (match_operand:SI 2 "const_0_operand")) ++ (label_ref (match_operand 0)) ++ (pc))) ++ (clobber (match_scratch:FCC 3 "=z"))] ++ "ISA_HAS_LSX" ++{ ++ return loongarch_output_conditional_branch (insn, operands, ++ "vset.\t%Z3%w1\n\tbcnez\t%Z3%0", ++ "vset.\t%Z3%w1\n\tbcnez\t%Z3%0"); ++} ++ [(set_attr "type" "simd_branch") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx__v_" ++ [(set (pc) (if_then_else ++ (equality_op ++ (unspec:SI [(match_operand:LSX 1 "register_operand" "f")] ++ UNSPEC_LSX_BRANCH_V) ++ (match_operand:SI 2 "const_0_operand")) ++ (label_ref (match_operand 0)) ++ (pc))) ++ (clobber (match_scratch:FCC 3 "=z"))] ++ "ISA_HAS_LSX" ++{ ++ return loongarch_output_conditional_branch (insn, operands, ++ "vset.v\t%Z3%w1\n\tbcnez\t%Z3%0", ++ "vset.v\t%Z3%w1\n\tbcnez\t%Z3%0"); ++} ++ [(set_attr "type" "simd_branch") ++ (set_attr "mode" "TI")]) ++ ++;; vec_concate ++(define_expand "vec_concatv2di" ++ [(set (match_operand:V2DI 0 "register_operand") ++ (vec_concat:V2DI ++ (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "register_operand")))] ++ "ISA_HAS_LSX" ++{ ++ emit_insn (gen_lsx_vinsgr2vr_d (operands[0], operands[1], ++ operands[0], GEN_INT (0))); ++ emit_insn (gen_lsx_vinsgr2vr_d (operands[0], operands[2], ++ operands[0], GEN_INT (1))); ++ DONE; ++}) ++ ++ ++(define_insn "vandn3" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (and:LSX (not:LSX (match_operand:LSX 1 "register_operand" "f")) ++ (match_operand:LSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vandn.v\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "vabs2" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (abs:ILSX (match_operand:ILSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vsigncov.\t%w0,%w1,%w1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "vneg2" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (neg:ILSX (match_operand:ILSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vneg.\t%w0,%w1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vmuh_s_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VMUH_S))] ++ "ISA_HAS_LSX" ++ "vmuh.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vmuh_u_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VMUH_U))] ++ "ISA_HAS_LSX" ++ "vmuh.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vextw_s_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "f")] ++ UNSPEC_LSX_VEXTW_S))] ++ "ISA_HAS_LSX" ++ "vextw_s.d\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vextw_u_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "f")] ++ UNSPEC_LSX_VEXTW_U))] ++ "ISA_HAS_LSX" ++ "vextw_u.d\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vsllwil_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_WHB 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSLLWIL_S))] ++ "ISA_HAS_LSX" ++ "vsllwil..\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsllwil_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_WHB 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSLLWIL_U))] ++ "ISA_HAS_LSX" ++ "vsllwil..\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsran__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSRAN))] ++ "ISA_HAS_LSX" ++ "vsran..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssran_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRAN_S))] ++ "ISA_HAS_LSX" ++ "vssran..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssran_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRAN_U))] ++ "ISA_HAS_LSX" ++ "vssran..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrain_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSRAIN))] ++ "ISA_HAS_LSX" ++ "vsrain.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; FIXME: bitimm ++(define_insn "lsx_vsrains_s_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSRAINS_S))] ++ "ISA_HAS_LSX" ++ "vsrains_s.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; FIXME: bitimm ++(define_insn "lsx_vsrains_u_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSRAINS_U))] ++ "ISA_HAS_LSX" ++ "vsrains_u.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrarn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSRARN))] ++ "ISA_HAS_LSX" ++ "vsrarn..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrarn_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRARN_S))] ++ "ISA_HAS_LSX" ++ "vssrarn..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrarn_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRARN_U))] ++ "ISA_HAS_LSX" ++ "vssrarn..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrln__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSRLN))] ++ "ISA_HAS_LSX" ++ "vsrln..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrln_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRLN_U))] ++ "ISA_HAS_LSX" ++ "vssrln..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrlrn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSRLRN))] ++ "ISA_HAS_LSX" ++ "vsrlrn..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrlrn_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRLRN_U))] ++ "ISA_HAS_LSX" ++ "vssrlrn..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfrstpi_" ++ [(set (match_operand:ILSX_HB 0 "register_operand" "=f") ++ (unspec:ILSX_HB [(match_operand:ILSX_HB 1 "register_operand" "0") ++ (match_operand:ILSX_HB 2 "register_operand" "f") ++ (match_operand 3 "const_uimm5_operand" "")] ++ UNSPEC_LSX_VFRSTPI))] ++ "ISA_HAS_LSX" ++ "vfrstpi.\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfrstp_" ++ [(set (match_operand:ILSX_HB 0 "register_operand" "=f") ++ (unspec:ILSX_HB [(match_operand:ILSX_HB 1 "register_operand" "0") ++ (match_operand:ILSX_HB 2 "register_operand" "f") ++ (match_operand:ILSX_HB 3 "register_operand" "f")] ++ UNSPEC_LSX_VFRSTP))] ++ "ISA_HAS_LSX" ++ "vfrstp.\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vshuf4i_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand")] ++ UNSPEC_LSX_VSHUF4I))] ++ "ISA_HAS_LSX" ++ "vshuf4i.d\t%w0,%w2,%3" ++ [(set_attr "type" "simd_sld") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vbsrl_" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (unspec:LSX [(match_operand:LSX 1 "register_operand" "f") ++ (match_operand 2 "const_uimm5_operand" "")] ++ UNSPEC_LSX_VBSRL_V))] ++ "ISA_HAS_LSX" ++ "vbsrl.v\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbsll_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const_uimm5_operand" "")] ++ UNSPEC_LSX_VBSLL_V))] ++ "ISA_HAS_LSX" ++ "vbsll.v\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vextrins_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VEXTRINS))] ++ "ISA_HAS_LSX" ++ "vextrins.\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vmskltz_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VMSKLTZ))] ++ "ISA_HAS_LSX" ++ "vmskltz.\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsigncov_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VSIGNCOV))] ++ "ISA_HAS_LSX" ++ "vsigncov.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_expand "copysign3" ++ [(set (match_dup 4) ++ (and:FLSX ++ (not:FLSX (match_dup 3)) ++ (match_operand:FLSX 1 "register_operand"))) ++ (set (match_dup 5) ++ (and:FLSX (match_dup 3) ++ (match_operand:FLSX 2 "register_operand"))) ++ (set (match_operand:FLSX 0 "register_operand") ++ (ior:FLSX (match_dup 4) (match_dup 5)))] ++ "ISA_HAS_LSX" ++{ ++ operands[3] = loongarch_build_signbit_mask (mode, 1, 0); ++ ++ operands[4] = gen_reg_rtx (mode); ++ operands[5] = gen_reg_rtx (mode); ++}) ++ ++(define_insn "absv2df2" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (abs:V2DF (match_operand:V2DF 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vbitclri.d\t%w0,%w1,63" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "absv4sf2" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (abs:V4SF (match_operand:V4SF 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vbitclri.w\t%w0,%w1,31" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "vfmadd4" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (fma:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f") ++ (match_operand:FLSX 3 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfmadd.\t%w0,%w1,$w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "fms4" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (fma:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f") ++ (neg:FLSX (match_operand:FLSX 3 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vfmsub.\t%w0,%w1,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "vfnmsub4_nmsub4" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (neg:FLSX ++ (fma:FLSX ++ (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f") ++ (neg:FLSX (match_operand:FLSX 3 "register_operand" "f")))))] ++ "ISA_HAS_LSX" ++ "vfnmsub.\t%w0,%w1,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "vfnmadd4_nmadd4" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (neg:FLSX ++ (fma:FLSX ++ (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f") ++ (match_operand:FLSX 3 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vfnmadd.\t%w0,%w1,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vftintrne_w_s" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRNE))] ++ "ISA_HAS_LSX" ++ "vftintrne.w.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrne_l_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRNE))] ++ "ISA_HAS_LSX" ++ "vftintrne.l.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftintrp_w_s" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRP))] ++ "ISA_HAS_LSX" ++ "vftintrp.w.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrp_l_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRP))] ++ "ISA_HAS_LSX" ++ "vftintrp.l.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftintrm_w_s" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRM))] ++ "ISA_HAS_LSX" ++ "vftintrm.w.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrm_l_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRM))] ++ "ISA_HAS_LSX" ++ "vftintrm.l.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftint_w_d" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFTINT_W_D))] ++ "ISA_HAS_LSX" ++ "vftint.w.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vffint_s_l" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VFFINT_S_L))] ++ "ISA_HAS_LSX" ++ "vffint.s.l\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vftintrz_w_d" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRZ_W_D))] ++ "ISA_HAS_LSX" ++ "vftintrz.w.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftintrp_w_d" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRP_W_D))] ++ "ISA_HAS_LSX" ++ "vftintrp.w.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftintrm_w_d" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRM_W_D))] ++ "ISA_HAS_LSX" ++ "vftintrm.w.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftintrne_w_d" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRNE_W_D))] ++ "ISA_HAS_LSX" ++ "vftintrne.w.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftinth_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTH_L_H))] ++ "ISA_HAS_LSX" ++ "vftinth.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintl_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTL_L_S))] ++ "ISA_HAS_LSX" ++ "vftintl.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vffinth_d_w" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V4SI 1 "register_operand" "f")] ++ UNSPEC_LSX_VFFINTH_D_W))] ++ "ISA_HAS_LSX" ++ "vffinth.d.w\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vffintl_d_w" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V4SI 1 "register_operand" "f")] ++ UNSPEC_LSX_VFFINTL_D_W))] ++ "ISA_HAS_LSX" ++ "vffintl.d.w\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vftintrzh_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRZH_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrzh.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrzl_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRZL_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrzl.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrph_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRPH_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrph.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrpl_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRPL_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrpl.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrmh_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRMH_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrmh.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrml_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRML_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrml.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrneh_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRNEH_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrneh.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrnel_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRNEL_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrnel.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfrintrne_s" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRNE_S))] ++ "ISA_HAS_LSX" ++ "vfrintrne.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfrintrne_d" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRNE_D))] ++ "ISA_HAS_LSX" ++ "vfrintrne.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vfrintrz_s" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRZ_S))] ++ "ISA_HAS_LSX" ++ "vfrintrz.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfrintrz_d" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRZ_D))] ++ "ISA_HAS_LSX" ++ "vfrintrz.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vfrintrp_s" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRP_S))] ++ "ISA_HAS_LSX" ++ "vfrintrp.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfrintrp_d" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRP_D))] ++ "ISA_HAS_LSX" ++ "vfrintrp.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vfrintrm_s" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRM_S))] ++ "ISA_HAS_LSX" ++ "vfrintrm.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfrintrm_d" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRM_D))] ++ "ISA_HAS_LSX" ++ "vfrintrm.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++;; Vector versions of the floating-point frint patterns. ++;; Expands to btrunc, ceil, floor, rint. ++(define_insn "v4sf2" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] ++ FRINT_S))] ++ "ISA_HAS_LSX" ++ "vfrint.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "v2df2" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] ++ FRINT_D))] ++ "ISA_HAS_LSX" ++ "vfrint.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++;; Expands to round. ++(define_insn "round2" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINT))] ++ "ISA_HAS_LSX" ++ "vfrint.\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; Offset load and broadcast ++(define_expand "lsx_vldrepl_" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq12_operand")] ++ "ISA_HAS_LSX" ++{ ++ emit_insn (gen_lsx_vldrepl__insn ++ (operands[0], operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_insn "lsx_vldrepl__insn" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (vec_duplicate:LSX ++ (mem: (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "aq12_operand")))))] ++ "ISA_HAS_LSX" ++{ ++ return "vldrepl.\t%w0,%1,%2"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++(define_insn "lsx_vldrepl__insn_0" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (vec_duplicate:LSX ++ (mem: (match_operand:DI 1 "register_operand" "r"))))] ++ "ISA_HAS_LSX" ++{ ++ return "vldrepl.\t%w0,%1,0"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++;; Offset store by sel ++(define_expand "lsx_vstelm_" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand 3 "const__operand") ++ (match_operand 2 "aq8_operand") ++ (match_operand 1 "pmode_register_operand")] ++ "ISA_HAS_LSX" ++{ ++ emit_insn (gen_lsx_vstelm__insn ++ (operands[1], operands[2], operands[0], operands[3])); ++ DONE; ++}) ++ ++(define_insn "lsx_vstelm__insn" ++ [(set (mem: (plus:DI (match_operand:DI 0 "register_operand" "r") ++ (match_operand 1 "aq8_operand"))) ++ (vec_select: ++ (match_operand:LSX 2 "register_operand" "f") ++ (parallel [(match_operand 3 "const__operand" "")])))] ++ ++ "ISA_HAS_LSX" ++{ ++ return "vstelm.\t%w2,%0,%1,%3"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++;; Offset is "0" ++(define_insn "lsx_vstelm__insn_0" ++ [(set (mem: (match_operand:DI 0 "register_operand" "r")) ++ (vec_select: ++ (match_operand:LSX 1 "register_operand" "f") ++ (parallel [(match_operand:SI 2 "const__operand")])))] ++ "ISA_HAS_LSX" ++{ ++ return "vstelm.\t%w1,%0,0,%2"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++(define_expand "lsx_vld" ++ [(match_operand:V16QI 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq12b_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (operands[0], gen_rtx_MEM (V16QImode, addr)); ++ DONE; ++}) ++ ++(define_expand "lsx_vst" ++ [(match_operand:V16QI 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq12b_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (gen_rtx_MEM (V16QImode, addr), operands[0]); ++ DONE; ++}) ++ ++(define_insn "lsx_vssrln__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRLN))] ++ "ISA_HAS_LSX" ++ "vssrln..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "lsx_vssrlrn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRLRN))] ++ "ISA_HAS_LSX" ++ "vssrlrn..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "vorn3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (ior:ILSX (not:ILSX (match_operand:ILSX 2 "register_operand" "f")) ++ (match_operand:ILSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vorn.v\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vldi" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand 1 "const_imm13_operand")] ++ UNSPEC_LSX_VLDI))] ++ "ISA_HAS_LSX" ++{ ++ HOST_WIDE_INT val = INTVAL (operands[1]); ++ if (val < 0) ++ { ++ HOST_WIDE_INT modeVal = (val & 0xf00) >> 8; ++ if (modeVal < 13) ++ return "vldi\t%w0,%1"; ++ else ++ sorry ("imm13 only support 0000 ~ 1100 in bits 9 ~ 12 when bit '13' is 1"); ++ return "#"; ++ } ++ else ++ return "vldi\t%w0,%1"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vshuf_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f") ++ (match_operand:V16QI 3 "register_operand" "f")] ++ UNSPEC_LSX_VSHUF_B))] ++ "ISA_HAS_LSX" ++ "vshuf.b\t%w0,%w1,%w2,%w3" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vldx" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (unspec:V16QI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "reg_or_0_operand" "rJ")] ++ UNSPEC_LSX_VLDX))] ++ "ISA_HAS_LSX" ++{ ++ return "vldx\t%w0,%1,%z2"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vstx" ++ [(set (mem:V16QI (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "reg_or_0_operand" "rJ"))) ++ (unspec: V16QI [(match_operand:V16QI 0 "register_operand" "f")] ++ UNSPEC_LSX_VSTX))] ++ ++ "ISA_HAS_LSX" ++{ ++ return "vstx\t%w0,%1,%z2"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "lsx_vextl_qu_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")] ++ UNSPEC_LSX_VEXTL_QU_DU))] ++ "ISA_HAS_LSX" ++ "vextl.qu.du\t%w0,%w1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vseteqz_v" ++ [(set (match_operand:FCC 0 "register_operand" "=z") ++ (eq:FCC ++ (unspec:SI [(match_operand:V16QI 1 "register_operand" "f")] ++ UNSPEC_LSX_VSETEQZ_V) ++ (match_operand:SI 2 "const_0_operand")))] ++ "ISA_HAS_LSX" ++{ ++ return "vseteqz.v\t%0,%1"; ++} ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "FCC")]) ++ ++;; Vector reduction operation ++(define_expand "reduc_plus_scal_v2di" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:V2DI 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (V2DImode); ++ emit_insn (gen_lsx_vhaddw_q_d (tmp, operands[1], operands[1])); ++ emit_insn (gen_vec_extractv2didi (operands[0], tmp, const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_plus_scal_v4si" ++ [(match_operand:SI 0 "register_operand") ++ (match_operand:V4SI 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (V2DImode); ++ rtx tmp1 = gen_reg_rtx (V2DImode); ++ emit_insn (gen_lsx_vhaddw_d_w (tmp, operands[1], operands[1])); ++ emit_insn (gen_lsx_vhaddw_q_d (tmp1, tmp, tmp)); ++ emit_insn (gen_vec_extractv4sisi (operands[0], gen_lowpart (V4SImode,tmp1), ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_plus_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:FLSX 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_add3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc__scal_" ++ [(any_bitwise: ++ (match_operand: 0 "register_operand") ++ (match_operand:ILSX 1 "register_operand"))] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_smax_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:LSX 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_smax3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_smin_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:LSX 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_smin3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_umax_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_umax3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_umin_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_umin3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_insn "lsx_vwev_d_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (addsubmul:V2DI ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2)]))) ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2)])))))] ++ "ISA_HAS_LSX" ++ "vwev.d.w\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vwev_w_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (addsubmul:V4SI ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LSX" ++ "vwev.w.h\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vwev_h_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (addsubmul:V8HI ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LSX" ++ "vwev.h.b\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vwod_d_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (addsubmul:V2DI ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3)]))) ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3)])))))] ++ "ISA_HAS_LSX" ++ "vwod.d.w\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vwod_w_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (addsubmul:V4SI ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))))] ++ "ISA_HAS_LSX" ++ "vwod.w.h\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vwod_h_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (addsubmul:V8HI ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)])))))] ++ "ISA_HAS_LSX" ++ "vwod.h.b\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vwev_d_wu_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (addmul:V2DI ++ (zero_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2)]))) ++ (sign_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2)])))))] ++ "ISA_HAS_LSX" ++ "vwev.d.wu.w\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vwev_w_hu_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (addmul:V4SI ++ (zero_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LSX" ++ "vwev.w.hu.h\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vwev_h_bu_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (addmul:V8HI ++ (zero_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (sign_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LSX" ++ "vwev.h.bu.b\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vwod_d_wu_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (addmul:V2DI ++ (zero_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3)]))) ++ (sign_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3)])))))] ++ "ISA_HAS_LSX" ++ "vwod.d.wu.w\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vwod_w_hu_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (addmul:V4SI ++ (zero_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))))] ++ "ISA_HAS_LSX" ++ "vwod.w.hu.h\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vwod_h_bu_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (addmul:V8HI ++ (zero_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (sign_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)])))))] ++ "ISA_HAS_LSX" ++ "vwod.h.bu.b\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vaddwev_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADDWEV))] ++ "ISA_HAS_LSX" ++ "vaddwev.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vaddwev_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADDWEV2))] ++ "ISA_HAS_LSX" ++ "vaddwev.q.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vaddwod_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADDWOD))] ++ "ISA_HAS_LSX" ++ "vaddwod.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vaddwod_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADDWOD2))] ++ "ISA_HAS_LSX" ++ "vaddwod.q.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vsubwev_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VSUBWEV))] ++ "ISA_HAS_LSX" ++ "vsubwev.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vsubwev_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VSUBWEV2))] ++ "ISA_HAS_LSX" ++ "vsubwev.q.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vsubwod_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VSUBWOD))] ++ "ISA_HAS_LSX" ++ "vsubwod.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vsubwod_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VSUBWOD2))] ++ "ISA_HAS_LSX" ++ "vsubwod.q.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vaddwev_q_du_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADDWEV3))] ++ "ISA_HAS_LSX" ++ "vaddwev.q.du.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vaddwod_q_du_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADDWOD3))] ++ "ISA_HAS_LSX" ++ "vaddwod.q.du.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmulwev_q_du_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VMULWEV3))] ++ "ISA_HAS_LSX" ++ "vmulwev.q.du.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmulwod_q_du_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VMULWOD3))] ++ "ISA_HAS_LSX" ++ "vmulwod.q.du.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmulwev_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VMULWEV))] ++ "ISA_HAS_LSX" ++ "vmulwev.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmulwev_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VMULWEV2))] ++ "ISA_HAS_LSX" ++ "vmulwev.q.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmulwod_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VMULWOD))] ++ "ISA_HAS_LSX" ++ "vmulwod.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmulwod_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VMULWOD2))] ++ "ISA_HAS_LSX" ++ "vmulwod.q.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vhaddw_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VHADDW_Q_D))] ++ "ISA_HAS_LSX" ++ "vhaddw.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vhaddw_qu_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VHADDW_QU_DU))] ++ "ISA_HAS_LSX" ++ "vhaddw.qu.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vhsubw_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VHSUBW_Q_D))] ++ "ISA_HAS_LSX" ++ "vhsubw.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vhsubw_qu_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VHSUBW_QU_DU))] ++ "ISA_HAS_LSX" ++ "vhsubw.qu.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwev_d_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (plus:V2DI ++ (match_operand:V2DI 1 "register_operand" "0") ++ (mult:V2DI ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2)]))) ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwev.d.w\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwev_w_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (plus:V4SI ++ (match_operand:V4SI 1 "register_operand" "0") ++ (mult:V4SI ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwev.w.h\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vmaddwev_h_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (plus:V8HI ++ (match_operand:V8HI 1 "register_operand" "0") ++ (mult:V8HI ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwev.h.b\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vmaddwod_d_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (plus:V2DI ++ (match_operand:V2DI 1 "register_operand" "0") ++ (mult:V2DI ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3)]))) ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwod.d.w\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwod_w_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (plus:V4SI ++ (match_operand:V4SI 1 "register_operand" "0") ++ (mult:V4SI ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwod.w.h\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vmaddwod_h_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (plus:V8HI ++ (match_operand:V8HI 1 "register_operand" "0") ++ (mult:V8HI ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwod.h.b\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vmaddwev_d_wu_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (plus:V2DI ++ (match_operand:V2DI 1 "register_operand" "0") ++ (mult:V2DI ++ (zero_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2)]))) ++ (sign_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwev.d.wu.w\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwev_w_hu_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (plus:V4SI ++ (match_operand:V4SI 1 "register_operand" "0") ++ (mult:V4SI ++ (zero_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwev.w.hu.h\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vmaddwev_h_bu_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (plus:V8HI ++ (match_operand:V8HI 1 "register_operand" "0") ++ (mult:V8HI ++ (zero_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (sign_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwev.h.bu.b\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vmaddwod_d_wu_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (plus:V2DI ++ (match_operand:V2DI 1 "register_operand" "0") ++ (mult:V2DI ++ (zero_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3)]))) ++ (sign_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwod.d.wu.w\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwod_w_hu_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (plus:V4SI ++ (match_operand:V4SI 1 "register_operand" "0") ++ (mult:V4SI ++ (zero_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwod.w.hu.h\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vmaddwod_h_bu_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (plus:V8HI ++ (match_operand:V8HI 1 "register_operand" "0") ++ (mult:V8HI ++ (zero_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (sign_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwod.h.bu.b\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vmaddwev_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand:V2DI 3 "register_operand" "f")] ++ UNSPEC_LSX_VMADDWEV))] ++ "ISA_HAS_LSX" ++ "vmaddwev.q.d\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwod_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand:V2DI 3 "register_operand" "f")] ++ UNSPEC_LSX_VMADDWOD))] ++ "ISA_HAS_LSX" ++ "vmaddwod.q.d\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwev_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand:V2DI 3 "register_operand" "f")] ++ UNSPEC_LSX_VMADDWEV2))] ++ "ISA_HAS_LSX" ++ "vmaddwev.q.du\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwod_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand:V2DI 3 "register_operand" "f")] ++ UNSPEC_LSX_VMADDWOD2))] ++ "ISA_HAS_LSX" ++ "vmaddwod.q.du\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwev_q_du_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand:V2DI 3 "register_operand" "f")] ++ UNSPEC_LSX_VMADDWEV3))] ++ "ISA_HAS_LSX" ++ "vmaddwev.q.du.d\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwod_q_du_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand:V2DI 3 "register_operand" "f")] ++ UNSPEC_LSX_VMADDWOD3))] ++ "ISA_HAS_LSX" ++ "vmaddwod.q.du.d\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vrotr_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VROTR))] ++ "ISA_HAS_LSX" ++ "vrotr.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vadd_q" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADD_Q))] ++ "ISA_HAS_LSX" ++ "vadd.q\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vsub_q" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VSUB_Q))] ++ "ISA_HAS_LSX" ++ "vsub.q\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmskgez_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f")] ++ UNSPEC_LSX_VMSKGEZ))] ++ "ISA_HAS_LSX" ++ "vmskgez.b\t%w0,%w1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vmsknz_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f")] ++ UNSPEC_LSX_VMSKNZ))] ++ "ISA_HAS_LSX" ++ "vmsknz.b\t%w0,%w1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vexth_h_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (parallel [(const_int 8) (const_int 9) ++ (const_int 10) (const_int 11) ++ (const_int 12) (const_int 13) ++ (const_int 14) (const_int 15)]))))] ++ "ISA_HAS_LSX" ++ "vexth.h.b\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vexth_w_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (parallel [(const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LSX" ++ "vexth.w.h\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vexth_d_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (parallel [(const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LSX" ++ "vexth.d.w\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vexth_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")] ++ UNSPEC_LSX_VEXTH_Q_D))] ++ "ISA_HAS_LSX" ++ "vexth.q.d\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vexth_qu_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")] ++ UNSPEC_LSX_VEXTH_QU_DU))] ++ "ISA_HAS_LSX" ++ "vexth.qu.du\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vrotri_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (rotatert:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")))] ++ "ISA_HAS_LSX" ++ "vrotri.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vextl_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")] ++ UNSPEC_LSX_VEXTL_Q_D))] ++ "ISA_HAS_LSX" ++ "vextl.q.d\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vsrlni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSRLNI))] ++ "ISA_HAS_LSX" ++ "vsrlni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrlrni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSRLRNI))] ++ "ISA_HAS_LSX" ++ "vsrlrni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrlni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRLNI))] ++ "ISA_HAS_LSX" ++ "vssrlni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrlni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRLNI2))] ++ "ISA_HAS_LSX" ++ "vssrlni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrlrni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRLRNI))] ++ "ISA_HAS_LSX" ++ "vssrlrni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrlrni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRLRNI2))] ++ "ISA_HAS_LSX" ++ "vssrlrni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrani__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSRANI))] ++ "ISA_HAS_LSX" ++ "vsrani..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrarni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSRARNI))] ++ "ISA_HAS_LSX" ++ "vsrarni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrani__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRANI))] ++ "ISA_HAS_LSX" ++ "vssrani..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrani__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRANI2))] ++ "ISA_HAS_LSX" ++ "vssrani..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrarni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRARNI))] ++ "ISA_HAS_LSX" ++ "vssrarni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrarni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRARNI2))] ++ "ISA_HAS_LSX" ++ "vssrarni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vpermi_w" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") ++ (match_operand:V4SI 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VPERMI))] ++ "ISA_HAS_LSX" ++ "vpermi.w\t%w0,%w2,%3" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V4SI")]) +diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md +index 4966d5569..cf9361b73 100644 +--- a/gcc/config/loongarch/predicates.md ++++ b/gcc/config/loongarch/predicates.md +@@ -87,10 +87,42 @@ + (and (match_code "const_int") + (match_test "IN_RANGE (INTVAL (op), 1, 4)"))) + ++(define_predicate "const_lsx_branch_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), -1024, 1023)"))) ++ ++(define_predicate "const_uimm3_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) ++ ++(define_predicate "const_8_to_11_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 8, 11)"))) ++ ++(define_predicate "const_12_to_15_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 12, 15)"))) ++ ++(define_predicate "const_uimm4_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 15)"))) ++ + (define_predicate "const_uimm5_operand" + (and (match_code "const_int") + (match_test "IN_RANGE (INTVAL (op), 0, 31)"))) + ++(define_predicate "const_uimm6_operand" ++ (and (match_code "const_int") ++ (match_test "UIMM6_OPERAND (INTVAL (op))"))) ++ ++(define_predicate "const_uimm7_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 127)"))) ++ ++(define_predicate "const_uimm8_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 255)"))) ++ + (define_predicate "const_uimm14_operand" + (and (match_code "const_int") + (match_test "IN_RANGE (INTVAL (op), 0, 16383)"))) +@@ -99,10 +131,74 @@ + (and (match_code "const_int") + (match_test "IN_RANGE (INTVAL (op), 0, 32767)"))) + ++(define_predicate "const_imm5_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), -16, 15)"))) ++ ++(define_predicate "const_imm10_operand" ++ (and (match_code "const_int") ++ (match_test "IMM10_OPERAND (INTVAL (op))"))) ++ + (define_predicate "const_imm12_operand" + (and (match_code "const_int") + (match_test "IMM12_OPERAND (INTVAL (op))"))) + ++(define_predicate "const_imm13_operand" ++ (and (match_code "const_int") ++ (match_test "IMM13_OPERAND (INTVAL (op))"))) ++ ++(define_predicate "reg_imm10_operand" ++ (ior (match_operand 0 "const_imm10_operand") ++ (match_operand 0 "register_operand"))) ++ ++(define_predicate "aq8b_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 0)"))) ++ ++(define_predicate "aq8h_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 1)"))) ++ ++(define_predicate "aq8w_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 2)"))) ++ ++(define_predicate "aq8d_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 3)"))) ++ ++(define_predicate "aq10b_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 0)"))) ++ ++(define_predicate "aq10h_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 1)"))) ++ ++(define_predicate "aq10w_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 2)"))) ++ ++(define_predicate "aq10d_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 3)"))) ++ ++(define_predicate "aq12b_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 12, 0)"))) ++ ++(define_predicate "aq12h_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 11, 1)"))) ++ ++(define_predicate "aq12w_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 2)"))) ++ ++(define_predicate "aq12d_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 9, 3)"))) ++ + (define_predicate "sle_operand" + (and (match_code "const_int") + (match_test "IMM12_OPERAND (INTVAL (op) + 1)"))) +@@ -112,29 +208,206 @@ + (match_test "INTVAL (op) + 1 != 0"))) + + (define_predicate "const_0_operand" +- (and (match_code "const_int,const_double,const_vector") ++ (and (match_code "const_int,const_wide_int,const_double,const_vector") + (match_test "op == CONST0_RTX (GET_MODE (op))"))) + ++(define_predicate "const_m1_operand" ++ (and (match_code "const_int,const_wide_int,const_double,const_vector") ++ (match_test "op == CONSTM1_RTX (GET_MODE (op))"))) ++ ++(define_predicate "reg_or_m1_operand" ++ (ior (match_operand 0 "const_m1_operand") ++ (match_operand 0 "register_operand"))) ++ + (define_predicate "reg_or_0_operand" + (ior (match_operand 0 "const_0_operand") + (match_operand 0 "register_operand"))) + + (define_predicate "const_1_operand" +- (and (match_code "const_int,const_double,const_vector") ++ (and (match_code "const_int,const_wide_int,const_double,const_vector") + (match_test "op == CONST1_RTX (GET_MODE (op))"))) + + (define_predicate "reg_or_1_operand" + (ior (match_operand 0 "const_1_operand") + (match_operand 0 "register_operand"))) + ++;; These are used in vec_merge, hence accept bitmask as const_int. ++(define_predicate "const_exp_2_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 1)"))) ++ ++(define_predicate "const_exp_4_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 3)"))) ++ ++(define_predicate "const_exp_8_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 7)"))) ++ ++(define_predicate "const_exp_16_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 15)"))) ++ ++(define_predicate "const_exp_32_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 31)"))) ++ ++;; This is used for indexing into vectors, and hence only accepts const_int. ++(define_predicate "const_0_or_1_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 1)"))) ++ ++(define_predicate "const_0_to_3_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 3)"))) ++ ++(define_predicate "const_0_to_7_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) ++ ++(define_predicate "const_2_or_3_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 2, 3)"))) ++ ++(define_predicate "const_4_to_7_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 4, 7)"))) ++ ++(define_predicate "const_8_to_15_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) ++ ++(define_predicate "const_16_to_31_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) ++ ++(define_predicate "qi_mask_operand" ++ (and (match_code "const_int") ++ (match_test "UINTVAL (op) == 0xff"))) ++ ++(define_predicate "hi_mask_operand" ++ (and (match_code "const_int") ++ (match_test "UINTVAL (op) == 0xffff"))) ++ + (define_predicate "lu52i_mask_operand" + (and (match_code "const_int") + (match_test "UINTVAL (op) == 0xfffffffffffff"))) + ++(define_predicate "si_mask_operand" ++ (and (match_code "const_int") ++ (match_test "UINTVAL (op) == 0xffffffff"))) ++ + (define_predicate "low_bitmask_operand" + (and (match_code "const_int") + (match_test "low_bitmask_len (mode, INTVAL (op)) > 12"))) + ++(define_predicate "d_operand" ++ (and (match_code "reg") ++ (match_test "GP_REG_P (REGNO (op))"))) ++ ++(define_predicate "db4_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 4, 0)"))) ++ ++(define_predicate "db7_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 7, 0)"))) ++ ++(define_predicate "db8_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 8, 0)"))) ++ ++(define_predicate "ib3_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op) - 1, 3, 0)"))) ++ ++(define_predicate "sb4_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 4, 0)"))) ++ ++(define_predicate "sb5_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 5, 0)"))) ++ ++(define_predicate "sb8_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 0)"))) ++ ++(define_predicate "sd8_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 3)"))) ++ ++(define_predicate "ub4_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 0)"))) ++ ++(define_predicate "ub8_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 8, 0)"))) ++ ++(define_predicate "uh4_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 1)"))) ++ ++(define_predicate "uw4_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 2)"))) ++ ++(define_predicate "uw5_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 5, 2)"))) ++ ++(define_predicate "uw6_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 6, 2)"))) ++ ++(define_predicate "uw8_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 8, 2)"))) ++ ++(define_predicate "addiur2_operand" ++ (and (match_code "const_int") ++ (ior (match_test "INTVAL (op) == -1") ++ (match_test "INTVAL (op) == 1") ++ (match_test "INTVAL (op) == 4") ++ (match_test "INTVAL (op) == 8") ++ (match_test "INTVAL (op) == 12") ++ (match_test "INTVAL (op) == 16") ++ (match_test "INTVAL (op) == 20") ++ (match_test "INTVAL (op) == 24")))) ++ ++(define_predicate "addiusp_operand" ++ (and (match_code "const_int") ++ (ior (match_test "(IN_RANGE (INTVAL (op), 2, 257))") ++ (match_test "(IN_RANGE (INTVAL (op), -258, -3))")))) ++ ++(define_predicate "andi16_operand" ++ (and (match_code "const_int") ++ (ior (match_test "IN_RANGE (INTVAL (op), 1, 4)") ++ (match_test "IN_RANGE (INTVAL (op), 7, 8)") ++ (match_test "IN_RANGE (INTVAL (op), 15, 16)") ++ (match_test "IN_RANGE (INTVAL (op), 31, 32)") ++ (match_test "IN_RANGE (INTVAL (op), 63, 64)") ++ (match_test "INTVAL (op) == 255") ++ (match_test "INTVAL (op) == 32768") ++ (match_test "INTVAL (op) == 65535")))) ++ ++(define_predicate "movep_src_register" ++ (and (match_code "reg") ++ (ior (match_test ("IN_RANGE (REGNO (op), 2, 3)")) ++ (match_test ("IN_RANGE (REGNO (op), 16, 20)"))))) ++ ++(define_predicate "movep_src_operand" ++ (ior (match_operand 0 "const_0_operand") ++ (match_operand 0 "movep_src_register"))) ++ ++(define_predicate "fcc_reload_operand" ++ (and (match_code "reg,subreg") ++ (match_test "FCC_REG_P (true_regnum (op))"))) ++ ++(define_predicate "muldiv_target_operand" ++ (match_operand 0 "register_operand")) ++ + (define_predicate "const_call_insn_operand" + (match_code "const,symbol_ref,label_ref") + { +@@ -303,3 +576,59 @@ + (define_predicate "non_volatile_mem_operand" + (and (match_operand 0 "memory_operand") + (not (match_test "MEM_VOLATILE_P (op)")))) ++ ++(define_predicate "const_vector_same_val_operand" ++ (match_code "const_vector") ++{ ++ return loongarch_const_vector_same_val_p (op, mode); ++}) ++ ++(define_predicate "const_vector_same_simm5_operand" ++ (match_code "const_vector") ++{ ++ return loongarch_const_vector_same_int_p (op, mode, -16, 15); ++}) ++ ++(define_predicate "const_vector_same_uimm5_operand" ++ (match_code "const_vector") ++{ ++ return loongarch_const_vector_same_int_p (op, mode, 0, 31); ++}) ++ ++(define_predicate "const_vector_same_ximm5_operand" ++ (match_code "const_vector") ++{ ++ return loongarch_const_vector_same_int_p (op, mode, -31, 31); ++}) ++ ++(define_predicate "const_vector_same_uimm6_operand" ++ (match_code "const_vector") ++{ ++ return loongarch_const_vector_same_int_p (op, mode, 0, 63); ++}) ++ ++(define_predicate "par_const_vector_shf_set_operand" ++ (match_code "parallel") ++{ ++ return loongarch_const_vector_shuffle_set_p (op, mode); ++}) ++ ++(define_predicate "reg_or_vector_same_val_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "const_vector_same_val_operand"))) ++ ++(define_predicate "reg_or_vector_same_simm5_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "const_vector_same_simm5_operand"))) ++ ++(define_predicate "reg_or_vector_same_uimm5_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "const_vector_same_uimm5_operand"))) ++ ++(define_predicate "reg_or_vector_same_ximm5_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "const_vector_same_ximm5_operand"))) ++ ++(define_predicate "reg_or_vector_same_uimm6_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "const_vector_same_uimm6_operand"))) +diff --git a/gcc/doc/md.texi b/gcc/doc/md.texi +index 3b544358b..b58da0787 100644 +--- a/gcc/doc/md.texi ++++ b/gcc/doc/md.texi +@@ -2764,6 +2764,17 @@ as @code{st.w} and @code{ld.w}. + A signed 12-bit constant (for arithmetic instructions). + @item K + An unsigned 12-bit constant (for logic instructions). ++@item M ++A constant that cannot be loaded using @code{lui}, @code{addiu} ++or @code{ori}. ++@item N ++A constant in the range -65535 to -1 (inclusive). ++@item O ++A signed 15-bit constant. ++@item P ++A constant in the range 1 to 65535 (inclusive). ++@item R ++An address that can be used in a non-macro load or store. + @item ZB + An address that is held in a general-purpose register. + The offset is zero. +-- +2.33.0 + diff --git a/LoongArch-Add-Loongson-SX-directive-builtin-function.patch b/LoongArch-Add-Loongson-SX-directive-builtin-function.patch new file mode 100644 index 0000000000000000000000000000000000000000..43b5ad219d8b88a2c739c92f52eb5b2119533afe --- /dev/null +++ b/LoongArch-Add-Loongson-SX-directive-builtin-function.patch @@ -0,0 +1,7549 @@ +From aafa5ab8c53dd2919d417b2f47e0c0e63ca7e10d Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Thu, 16 Mar 2023 16:31:04 +0800 +Subject: [PATCH 064/124] LoongArch: Add Loongson SX directive builtin function + support. + +gcc/ChangeLog: + + * config.gcc: Export the header file lsxintrin.h. + * config/loongarch/loongarch-builtins.cc (LARCH_FTYPE_NAME4): Add builtin function support. + (enum loongarch_builtin_type): Ditto. + (AVAIL_ALL): Ditto. + (LARCH_BUILTIN): Ditto. + (LSX_BUILTIN): Ditto. + (LSX_BUILTIN_TEST_BRANCH): Ditto. + (LSX_NO_TARGET_BUILTIN): Ditto. + (CODE_FOR_lsx_vsadd_b): Ditto. + (CODE_FOR_lsx_vsadd_h): Ditto. + (CODE_FOR_lsx_vsadd_w): Ditto. + (CODE_FOR_lsx_vsadd_d): Ditto. + (CODE_FOR_lsx_vsadd_bu): Ditto. + (CODE_FOR_lsx_vsadd_hu): Ditto. + (CODE_FOR_lsx_vsadd_wu): Ditto. + (CODE_FOR_lsx_vsadd_du): Ditto. + (CODE_FOR_lsx_vadd_b): Ditto. + (CODE_FOR_lsx_vadd_h): Ditto. + (CODE_FOR_lsx_vadd_w): Ditto. + (CODE_FOR_lsx_vadd_d): Ditto. + (CODE_FOR_lsx_vaddi_bu): Ditto. + (CODE_FOR_lsx_vaddi_hu): Ditto. + (CODE_FOR_lsx_vaddi_wu): Ditto. + (CODE_FOR_lsx_vaddi_du): Ditto. + (CODE_FOR_lsx_vand_v): Ditto. + (CODE_FOR_lsx_vandi_b): Ditto. + (CODE_FOR_lsx_bnz_v): Ditto. + (CODE_FOR_lsx_bz_v): Ditto. + (CODE_FOR_lsx_vbitsel_v): Ditto. + (CODE_FOR_lsx_vseqi_b): Ditto. + (CODE_FOR_lsx_vseqi_h): Ditto. + (CODE_FOR_lsx_vseqi_w): Ditto. + (CODE_FOR_lsx_vseqi_d): Ditto. + (CODE_FOR_lsx_vslti_b): Ditto. + (CODE_FOR_lsx_vslti_h): Ditto. + (CODE_FOR_lsx_vslti_w): Ditto. + (CODE_FOR_lsx_vslti_d): Ditto. + (CODE_FOR_lsx_vslti_bu): Ditto. + (CODE_FOR_lsx_vslti_hu): Ditto. + (CODE_FOR_lsx_vslti_wu): Ditto. + (CODE_FOR_lsx_vslti_du): Ditto. + (CODE_FOR_lsx_vslei_b): Ditto. + (CODE_FOR_lsx_vslei_h): Ditto. + (CODE_FOR_lsx_vslei_w): Ditto. + (CODE_FOR_lsx_vslei_d): Ditto. + (CODE_FOR_lsx_vslei_bu): Ditto. + (CODE_FOR_lsx_vslei_hu): Ditto. + (CODE_FOR_lsx_vslei_wu): Ditto. + (CODE_FOR_lsx_vslei_du): Ditto. + (CODE_FOR_lsx_vdiv_b): Ditto. + (CODE_FOR_lsx_vdiv_h): Ditto. + (CODE_FOR_lsx_vdiv_w): Ditto. + (CODE_FOR_lsx_vdiv_d): Ditto. + (CODE_FOR_lsx_vdiv_bu): Ditto. + (CODE_FOR_lsx_vdiv_hu): Ditto. + (CODE_FOR_lsx_vdiv_wu): Ditto. + (CODE_FOR_lsx_vdiv_du): Ditto. + (CODE_FOR_lsx_vfadd_s): Ditto. + (CODE_FOR_lsx_vfadd_d): Ditto. + (CODE_FOR_lsx_vftintrz_w_s): Ditto. + (CODE_FOR_lsx_vftintrz_l_d): Ditto. + (CODE_FOR_lsx_vftintrz_wu_s): Ditto. + (CODE_FOR_lsx_vftintrz_lu_d): Ditto. + (CODE_FOR_lsx_vffint_s_w): Ditto. + (CODE_FOR_lsx_vffint_d_l): Ditto. + (CODE_FOR_lsx_vffint_s_wu): Ditto. + (CODE_FOR_lsx_vffint_d_lu): Ditto. + (CODE_FOR_lsx_vfsub_s): Ditto. + (CODE_FOR_lsx_vfsub_d): Ditto. + (CODE_FOR_lsx_vfmul_s): Ditto. + (CODE_FOR_lsx_vfmul_d): Ditto. + (CODE_FOR_lsx_vfdiv_s): Ditto. + (CODE_FOR_lsx_vfdiv_d): Ditto. + (CODE_FOR_lsx_vfmax_s): Ditto. + (CODE_FOR_lsx_vfmax_d): Ditto. + (CODE_FOR_lsx_vfmin_s): Ditto. + (CODE_FOR_lsx_vfmin_d): Ditto. + (CODE_FOR_lsx_vfsqrt_s): Ditto. + (CODE_FOR_lsx_vfsqrt_d): Ditto. + (CODE_FOR_lsx_vflogb_s): Ditto. + (CODE_FOR_lsx_vflogb_d): Ditto. + (CODE_FOR_lsx_vmax_b): Ditto. + (CODE_FOR_lsx_vmax_h): Ditto. + (CODE_FOR_lsx_vmax_w): Ditto. + (CODE_FOR_lsx_vmax_d): Ditto. + (CODE_FOR_lsx_vmaxi_b): Ditto. + (CODE_FOR_lsx_vmaxi_h): Ditto. + (CODE_FOR_lsx_vmaxi_w): Ditto. + (CODE_FOR_lsx_vmaxi_d): Ditto. + (CODE_FOR_lsx_vmax_bu): Ditto. + (CODE_FOR_lsx_vmax_hu): Ditto. + (CODE_FOR_lsx_vmax_wu): Ditto. + (CODE_FOR_lsx_vmax_du): Ditto. + (CODE_FOR_lsx_vmaxi_bu): Ditto. + (CODE_FOR_lsx_vmaxi_hu): Ditto. + (CODE_FOR_lsx_vmaxi_wu): Ditto. + (CODE_FOR_lsx_vmaxi_du): Ditto. + (CODE_FOR_lsx_vmin_b): Ditto. + (CODE_FOR_lsx_vmin_h): Ditto. + (CODE_FOR_lsx_vmin_w): Ditto. + (CODE_FOR_lsx_vmin_d): Ditto. + (CODE_FOR_lsx_vmini_b): Ditto. + (CODE_FOR_lsx_vmini_h): Ditto. + (CODE_FOR_lsx_vmini_w): Ditto. + (CODE_FOR_lsx_vmini_d): Ditto. + (CODE_FOR_lsx_vmin_bu): Ditto. + (CODE_FOR_lsx_vmin_hu): Ditto. + (CODE_FOR_lsx_vmin_wu): Ditto. + (CODE_FOR_lsx_vmin_du): Ditto. + (CODE_FOR_lsx_vmini_bu): Ditto. + (CODE_FOR_lsx_vmini_hu): Ditto. + (CODE_FOR_lsx_vmini_wu): Ditto. + (CODE_FOR_lsx_vmini_du): Ditto. + (CODE_FOR_lsx_vmod_b): Ditto. + (CODE_FOR_lsx_vmod_h): Ditto. + (CODE_FOR_lsx_vmod_w): Ditto. + (CODE_FOR_lsx_vmod_d): Ditto. + (CODE_FOR_lsx_vmod_bu): Ditto. + (CODE_FOR_lsx_vmod_hu): Ditto. + (CODE_FOR_lsx_vmod_wu): Ditto. + (CODE_FOR_lsx_vmod_du): Ditto. + (CODE_FOR_lsx_vmul_b): Ditto. + (CODE_FOR_lsx_vmul_h): Ditto. + (CODE_FOR_lsx_vmul_w): Ditto. + (CODE_FOR_lsx_vmul_d): Ditto. + (CODE_FOR_lsx_vclz_b): Ditto. + (CODE_FOR_lsx_vclz_h): Ditto. + (CODE_FOR_lsx_vclz_w): Ditto. + (CODE_FOR_lsx_vclz_d): Ditto. + (CODE_FOR_lsx_vnor_v): Ditto. + (CODE_FOR_lsx_vor_v): Ditto. + (CODE_FOR_lsx_vori_b): Ditto. + (CODE_FOR_lsx_vnori_b): Ditto. + (CODE_FOR_lsx_vpcnt_b): Ditto. + (CODE_FOR_lsx_vpcnt_h): Ditto. + (CODE_FOR_lsx_vpcnt_w): Ditto. + (CODE_FOR_lsx_vpcnt_d): Ditto. + (CODE_FOR_lsx_vxor_v): Ditto. + (CODE_FOR_lsx_vxori_b): Ditto. + (CODE_FOR_lsx_vsll_b): Ditto. + (CODE_FOR_lsx_vsll_h): Ditto. + (CODE_FOR_lsx_vsll_w): Ditto. + (CODE_FOR_lsx_vsll_d): Ditto. + (CODE_FOR_lsx_vslli_b): Ditto. + (CODE_FOR_lsx_vslli_h): Ditto. + (CODE_FOR_lsx_vslli_w): Ditto. + (CODE_FOR_lsx_vslli_d): Ditto. + (CODE_FOR_lsx_vsra_b): Ditto. + (CODE_FOR_lsx_vsra_h): Ditto. + (CODE_FOR_lsx_vsra_w): Ditto. + (CODE_FOR_lsx_vsra_d): Ditto. + (CODE_FOR_lsx_vsrai_b): Ditto. + (CODE_FOR_lsx_vsrai_h): Ditto. + (CODE_FOR_lsx_vsrai_w): Ditto. + (CODE_FOR_lsx_vsrai_d): Ditto. + (CODE_FOR_lsx_vsrl_b): Ditto. + (CODE_FOR_lsx_vsrl_h): Ditto. + (CODE_FOR_lsx_vsrl_w): Ditto. + (CODE_FOR_lsx_vsrl_d): Ditto. + (CODE_FOR_lsx_vsrli_b): Ditto. + (CODE_FOR_lsx_vsrli_h): Ditto. + (CODE_FOR_lsx_vsrli_w): Ditto. + (CODE_FOR_lsx_vsrli_d): Ditto. + (CODE_FOR_lsx_vsub_b): Ditto. + (CODE_FOR_lsx_vsub_h): Ditto. + (CODE_FOR_lsx_vsub_w): Ditto. + (CODE_FOR_lsx_vsub_d): Ditto. + (CODE_FOR_lsx_vsubi_bu): Ditto. + (CODE_FOR_lsx_vsubi_hu): Ditto. + (CODE_FOR_lsx_vsubi_wu): Ditto. + (CODE_FOR_lsx_vsubi_du): Ditto. + (CODE_FOR_lsx_vpackod_d): Ditto. + (CODE_FOR_lsx_vpackev_d): Ditto. + (CODE_FOR_lsx_vpickod_d): Ditto. + (CODE_FOR_lsx_vpickev_d): Ditto. + (CODE_FOR_lsx_vrepli_b): Ditto. + (CODE_FOR_lsx_vrepli_h): Ditto. + (CODE_FOR_lsx_vrepli_w): Ditto. + (CODE_FOR_lsx_vrepli_d): Ditto. + (CODE_FOR_lsx_vsat_b): Ditto. + (CODE_FOR_lsx_vsat_h): Ditto. + (CODE_FOR_lsx_vsat_w): Ditto. + (CODE_FOR_lsx_vsat_d): Ditto. + (CODE_FOR_lsx_vsat_bu): Ditto. + (CODE_FOR_lsx_vsat_hu): Ditto. + (CODE_FOR_lsx_vsat_wu): Ditto. + (CODE_FOR_lsx_vsat_du): Ditto. + (CODE_FOR_lsx_vavg_b): Ditto. + (CODE_FOR_lsx_vavg_h): Ditto. + (CODE_FOR_lsx_vavg_w): Ditto. + (CODE_FOR_lsx_vavg_d): Ditto. + (CODE_FOR_lsx_vavg_bu): Ditto. + (CODE_FOR_lsx_vavg_hu): Ditto. + (CODE_FOR_lsx_vavg_wu): Ditto. + (CODE_FOR_lsx_vavg_du): Ditto. + (CODE_FOR_lsx_vavgr_b): Ditto. + (CODE_FOR_lsx_vavgr_h): Ditto. + (CODE_FOR_lsx_vavgr_w): Ditto. + (CODE_FOR_lsx_vavgr_d): Ditto. + (CODE_FOR_lsx_vavgr_bu): Ditto. + (CODE_FOR_lsx_vavgr_hu): Ditto. + (CODE_FOR_lsx_vavgr_wu): Ditto. + (CODE_FOR_lsx_vavgr_du): Ditto. + (CODE_FOR_lsx_vssub_b): Ditto. + (CODE_FOR_lsx_vssub_h): Ditto. + (CODE_FOR_lsx_vssub_w): Ditto. + (CODE_FOR_lsx_vssub_d): Ditto. + (CODE_FOR_lsx_vssub_bu): Ditto. + (CODE_FOR_lsx_vssub_hu): Ditto. + (CODE_FOR_lsx_vssub_wu): Ditto. + (CODE_FOR_lsx_vssub_du): Ditto. + (CODE_FOR_lsx_vabsd_b): Ditto. + (CODE_FOR_lsx_vabsd_h): Ditto. + (CODE_FOR_lsx_vabsd_w): Ditto. + (CODE_FOR_lsx_vabsd_d): Ditto. + (CODE_FOR_lsx_vabsd_bu): Ditto. + (CODE_FOR_lsx_vabsd_hu): Ditto. + (CODE_FOR_lsx_vabsd_wu): Ditto. + (CODE_FOR_lsx_vabsd_du): Ditto. + (CODE_FOR_lsx_vftint_w_s): Ditto. + (CODE_FOR_lsx_vftint_l_d): Ditto. + (CODE_FOR_lsx_vftint_wu_s): Ditto. + (CODE_FOR_lsx_vftint_lu_d): Ditto. + (CODE_FOR_lsx_vandn_v): Ditto. + (CODE_FOR_lsx_vorn_v): Ditto. + (CODE_FOR_lsx_vneg_b): Ditto. + (CODE_FOR_lsx_vneg_h): Ditto. + (CODE_FOR_lsx_vneg_w): Ditto. + (CODE_FOR_lsx_vneg_d): Ditto. + (CODE_FOR_lsx_vshuf4i_d): Ditto. + (CODE_FOR_lsx_vbsrl_v): Ditto. + (CODE_FOR_lsx_vbsll_v): Ditto. + (CODE_FOR_lsx_vfmadd_s): Ditto. + (CODE_FOR_lsx_vfmadd_d): Ditto. + (CODE_FOR_lsx_vfmsub_s): Ditto. + (CODE_FOR_lsx_vfmsub_d): Ditto. + (CODE_FOR_lsx_vfnmadd_s): Ditto. + (CODE_FOR_lsx_vfnmadd_d): Ditto. + (CODE_FOR_lsx_vfnmsub_s): Ditto. + (CODE_FOR_lsx_vfnmsub_d): Ditto. + (CODE_FOR_lsx_vmuh_b): Ditto. + (CODE_FOR_lsx_vmuh_h): Ditto. + (CODE_FOR_lsx_vmuh_w): Ditto. + (CODE_FOR_lsx_vmuh_d): Ditto. + (CODE_FOR_lsx_vmuh_bu): Ditto. + (CODE_FOR_lsx_vmuh_hu): Ditto. + (CODE_FOR_lsx_vmuh_wu): Ditto. + (CODE_FOR_lsx_vmuh_du): Ditto. + (CODE_FOR_lsx_vsllwil_h_b): Ditto. + (CODE_FOR_lsx_vsllwil_w_h): Ditto. + (CODE_FOR_lsx_vsllwil_d_w): Ditto. + (CODE_FOR_lsx_vsllwil_hu_bu): Ditto. + (CODE_FOR_lsx_vsllwil_wu_hu): Ditto. + (CODE_FOR_lsx_vsllwil_du_wu): Ditto. + (CODE_FOR_lsx_vssran_b_h): Ditto. + (CODE_FOR_lsx_vssran_h_w): Ditto. + (CODE_FOR_lsx_vssran_w_d): Ditto. + (CODE_FOR_lsx_vssran_bu_h): Ditto. + (CODE_FOR_lsx_vssran_hu_w): Ditto. + (CODE_FOR_lsx_vssran_wu_d): Ditto. + (CODE_FOR_lsx_vssrarn_b_h): Ditto. + (CODE_FOR_lsx_vssrarn_h_w): Ditto. + (CODE_FOR_lsx_vssrarn_w_d): Ditto. + (CODE_FOR_lsx_vssrarn_bu_h): Ditto. + (CODE_FOR_lsx_vssrarn_hu_w): Ditto. + (CODE_FOR_lsx_vssrarn_wu_d): Ditto. + (CODE_FOR_lsx_vssrln_bu_h): Ditto. + (CODE_FOR_lsx_vssrln_hu_w): Ditto. + (CODE_FOR_lsx_vssrln_wu_d): Ditto. + (CODE_FOR_lsx_vssrlrn_bu_h): Ditto. + (CODE_FOR_lsx_vssrlrn_hu_w): Ditto. + (CODE_FOR_lsx_vssrlrn_wu_d): Ditto. + (loongarch_builtin_vector_type): Ditto. + (loongarch_build_cvpointer_type): Ditto. + (LARCH_ATYPE_CVPOINTER): Ditto. + (LARCH_ATYPE_BOOLEAN): Ditto. + (LARCH_ATYPE_V2SF): Ditto. + (LARCH_ATYPE_V2HI): Ditto. + (LARCH_ATYPE_V2SI): Ditto. + (LARCH_ATYPE_V4QI): Ditto. + (LARCH_ATYPE_V4HI): Ditto. + (LARCH_ATYPE_V8QI): Ditto. + (LARCH_ATYPE_V2DI): Ditto. + (LARCH_ATYPE_V4SI): Ditto. + (LARCH_ATYPE_V8HI): Ditto. + (LARCH_ATYPE_V16QI): Ditto. + (LARCH_ATYPE_V2DF): Ditto. + (LARCH_ATYPE_V4SF): Ditto. + (LARCH_ATYPE_V4DI): Ditto. + (LARCH_ATYPE_V8SI): Ditto. + (LARCH_ATYPE_V16HI): Ditto. + (LARCH_ATYPE_V32QI): Ditto. + (LARCH_ATYPE_V4DF): Ditto. + (LARCH_ATYPE_V8SF): Ditto. + (LARCH_ATYPE_UV2DI): Ditto. + (LARCH_ATYPE_UV4SI): Ditto. + (LARCH_ATYPE_UV8HI): Ditto. + (LARCH_ATYPE_UV16QI): Ditto. + (LARCH_ATYPE_UV4DI): Ditto. + (LARCH_ATYPE_UV8SI): Ditto. + (LARCH_ATYPE_UV16HI): Ditto. + (LARCH_ATYPE_UV32QI): Ditto. + (LARCH_ATYPE_UV2SI): Ditto. + (LARCH_ATYPE_UV4HI): Ditto. + (LARCH_ATYPE_UV8QI): Ditto. + (loongarch_builtin_vectorized_function): Ditto. + (LARCH_GET_BUILTIN): Ditto. + (loongarch_expand_builtin_insn): Ditto. + (loongarch_expand_builtin_lsx_test_branch): Ditto. + (loongarch_expand_builtin): Ditto. + * config/loongarch/loongarch-ftypes.def (1): Ditto. + (2): Ditto. + (3): Ditto. + (4): Ditto. + * config/loongarch/lsxintrin.h: New file. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config.gcc | 2 +- + gcc/config/loongarch/loongarch-builtins.cc | 1498 +++++- + gcc/config/loongarch/loongarch-ftypes.def | 395 +- + gcc/config/loongarch/lsxintrin.h | 5181 ++++++++++++++++++++ + 4 files changed, 7070 insertions(+), 6 deletions(-) + create mode 100644 gcc/config/loongarch/lsxintrin.h + +diff --git a/gcc/config.gcc b/gcc/config.gcc +index 61d81d8d8..4e149e0ef 100644 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -456,7 +456,7 @@ mips*-*-*) + ;; + loongarch*-*-*) + cpu_type=loongarch +- extra_headers="larchintrin.h" ++ extra_headers="larchintrin.h lsxintrin.h" + extra_objs="loongarch-c.o loongarch-builtins.o loongarch-cpu.o loongarch-opts.o loongarch-def.o" + extra_gcc_objs="loongarch-driver.o loongarch-cpu.o loongarch-opts.o loongarch-def.o" + extra_options="${extra_options} g.opt fused-madd.opt" +diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc +index c8548a07f..de6428ac6 100644 +--- a/gcc/config/loongarch/loongarch-builtins.cc ++++ b/gcc/config/loongarch/loongarch-builtins.cc +@@ -34,14 +34,18 @@ along with GCC; see the file COPYING3. If not see + #include "recog.h" + #include "diagnostic.h" + #include "fold-const.h" ++#include "explow.h" + #include "expr.h" + #include "langhooks.h" + #include "emit-rtl.h" ++#include "case-cfn-macros.h" + + /* Macros to create an enumeration identifier for a function prototype. */ + #define LARCH_FTYPE_NAME1(A, B) LARCH_##A##_FTYPE_##B + #define LARCH_FTYPE_NAME2(A, B, C) LARCH_##A##_FTYPE_##B##_##C + #define LARCH_FTYPE_NAME3(A, B, C, D) LARCH_##A##_FTYPE_##B##_##C##_##D ++#define LARCH_FTYPE_NAME4(A, B, C, D, E) \ ++ LARCH_##A##_FTYPE_##B##_##C##_##D##_##E + + /* Classifies the prototype of a built-in function. */ + enum loongarch_function_type +@@ -64,6 +68,12 @@ enum loongarch_builtin_type + value and the arguments are mapped to operands 0 and above. */ + LARCH_BUILTIN_DIRECT_NO_TARGET, + ++ /* For generating LoongArch LSX. */ ++ LARCH_BUILTIN_LSX, ++ ++ /* The function corresponds to an LSX conditional branch instruction ++ combined with a compare instruction. */ ++ LARCH_BUILTIN_LSX_TEST_BRANCH, + }; + + /* Declare an availability predicate for built-in functions that require +@@ -101,6 +111,7 @@ struct loongarch_builtin_description + }; + + AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI) ++AVAIL_ALL (lsx, ISA_HAS_LSX) + + /* Construct a loongarch_builtin_description from the given arguments. + +@@ -120,8 +131,8 @@ AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI) + #define LARCH_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL) \ + { \ + CODE_FOR_loongarch_##INSN, "__builtin_loongarch_" NAME, \ +- BUILTIN_TYPE, FUNCTION_TYPE, \ +- loongarch_builtin_avail_##AVAIL \ ++ BUILTIN_TYPE, FUNCTION_TYPE, \ ++ loongarch_builtin_avail_##AVAIL \ + } + + /* Define __builtin_loongarch_, which is a LARCH_BUILTIN_DIRECT function +@@ -137,6 +148,300 @@ AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI) + LARCH_BUILTIN (INSN, #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ + FUNCTION_TYPE, AVAIL) + ++/* Define an LSX LARCH_BUILTIN_DIRECT function __builtin_lsx_ ++ for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LSX_BUILTIN(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lsx_ ## INSN, \ ++ "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lsx } ++ ++ ++/* Define an LSX LARCH_BUILTIN_LSX_TEST_BRANCH function __builtin_lsx_ ++ for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LSX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lsx_ ## INSN, \ ++ "__builtin_lsx_" #INSN, LARCH_BUILTIN_LSX_TEST_BRANCH, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lsx } ++ ++/* Define an LSX LARCH_BUILTIN_DIRECT_NO_TARGET function __builtin_lsx_ ++ for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LSX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lsx_ ## INSN, \ ++ "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lsx } ++ ++/* LoongArch SX define CODE_FOR_lsx_xxx */ ++#define CODE_FOR_lsx_vsadd_b CODE_FOR_ssaddv16qi3 ++#define CODE_FOR_lsx_vsadd_h CODE_FOR_ssaddv8hi3 ++#define CODE_FOR_lsx_vsadd_w CODE_FOR_ssaddv4si3 ++#define CODE_FOR_lsx_vsadd_d CODE_FOR_ssaddv2di3 ++#define CODE_FOR_lsx_vsadd_bu CODE_FOR_usaddv16qi3 ++#define CODE_FOR_lsx_vsadd_hu CODE_FOR_usaddv8hi3 ++#define CODE_FOR_lsx_vsadd_wu CODE_FOR_usaddv4si3 ++#define CODE_FOR_lsx_vsadd_du CODE_FOR_usaddv2di3 ++#define CODE_FOR_lsx_vadd_b CODE_FOR_addv16qi3 ++#define CODE_FOR_lsx_vadd_h CODE_FOR_addv8hi3 ++#define CODE_FOR_lsx_vadd_w CODE_FOR_addv4si3 ++#define CODE_FOR_lsx_vadd_d CODE_FOR_addv2di3 ++#define CODE_FOR_lsx_vaddi_bu CODE_FOR_addv16qi3 ++#define CODE_FOR_lsx_vaddi_hu CODE_FOR_addv8hi3 ++#define CODE_FOR_lsx_vaddi_wu CODE_FOR_addv4si3 ++#define CODE_FOR_lsx_vaddi_du CODE_FOR_addv2di3 ++#define CODE_FOR_lsx_vand_v CODE_FOR_andv16qi3 ++#define CODE_FOR_lsx_vandi_b CODE_FOR_andv16qi3 ++#define CODE_FOR_lsx_bnz_v CODE_FOR_lsx_bnz_v_b ++#define CODE_FOR_lsx_bz_v CODE_FOR_lsx_bz_v_b ++#define CODE_FOR_lsx_vbitsel_v CODE_FOR_lsx_vbitsel_b ++#define CODE_FOR_lsx_vseqi_b CODE_FOR_lsx_vseq_b ++#define CODE_FOR_lsx_vseqi_h CODE_FOR_lsx_vseq_h ++#define CODE_FOR_lsx_vseqi_w CODE_FOR_lsx_vseq_w ++#define CODE_FOR_lsx_vseqi_d CODE_FOR_lsx_vseq_d ++#define CODE_FOR_lsx_vslti_b CODE_FOR_lsx_vslt_b ++#define CODE_FOR_lsx_vslti_h CODE_FOR_lsx_vslt_h ++#define CODE_FOR_lsx_vslti_w CODE_FOR_lsx_vslt_w ++#define CODE_FOR_lsx_vslti_d CODE_FOR_lsx_vslt_d ++#define CODE_FOR_lsx_vslti_bu CODE_FOR_lsx_vslt_bu ++#define CODE_FOR_lsx_vslti_hu CODE_FOR_lsx_vslt_hu ++#define CODE_FOR_lsx_vslti_wu CODE_FOR_lsx_vslt_wu ++#define CODE_FOR_lsx_vslti_du CODE_FOR_lsx_vslt_du ++#define CODE_FOR_lsx_vslei_b CODE_FOR_lsx_vsle_b ++#define CODE_FOR_lsx_vslei_h CODE_FOR_lsx_vsle_h ++#define CODE_FOR_lsx_vslei_w CODE_FOR_lsx_vsle_w ++#define CODE_FOR_lsx_vslei_d CODE_FOR_lsx_vsle_d ++#define CODE_FOR_lsx_vslei_bu CODE_FOR_lsx_vsle_bu ++#define CODE_FOR_lsx_vslei_hu CODE_FOR_lsx_vsle_hu ++#define CODE_FOR_lsx_vslei_wu CODE_FOR_lsx_vsle_wu ++#define CODE_FOR_lsx_vslei_du CODE_FOR_lsx_vsle_du ++#define CODE_FOR_lsx_vdiv_b CODE_FOR_divv16qi3 ++#define CODE_FOR_lsx_vdiv_h CODE_FOR_divv8hi3 ++#define CODE_FOR_lsx_vdiv_w CODE_FOR_divv4si3 ++#define CODE_FOR_lsx_vdiv_d CODE_FOR_divv2di3 ++#define CODE_FOR_lsx_vdiv_bu CODE_FOR_udivv16qi3 ++#define CODE_FOR_lsx_vdiv_hu CODE_FOR_udivv8hi3 ++#define CODE_FOR_lsx_vdiv_wu CODE_FOR_udivv4si3 ++#define CODE_FOR_lsx_vdiv_du CODE_FOR_udivv2di3 ++#define CODE_FOR_lsx_vfadd_s CODE_FOR_addv4sf3 ++#define CODE_FOR_lsx_vfadd_d CODE_FOR_addv2df3 ++#define CODE_FOR_lsx_vftintrz_w_s CODE_FOR_fix_truncv4sfv4si2 ++#define CODE_FOR_lsx_vftintrz_l_d CODE_FOR_fix_truncv2dfv2di2 ++#define CODE_FOR_lsx_vftintrz_wu_s CODE_FOR_fixuns_truncv4sfv4si2 ++#define CODE_FOR_lsx_vftintrz_lu_d CODE_FOR_fixuns_truncv2dfv2di2 ++#define CODE_FOR_lsx_vffint_s_w CODE_FOR_floatv4siv4sf2 ++#define CODE_FOR_lsx_vffint_d_l CODE_FOR_floatv2div2df2 ++#define CODE_FOR_lsx_vffint_s_wu CODE_FOR_floatunsv4siv4sf2 ++#define CODE_FOR_lsx_vffint_d_lu CODE_FOR_floatunsv2div2df2 ++#define CODE_FOR_lsx_vfsub_s CODE_FOR_subv4sf3 ++#define CODE_FOR_lsx_vfsub_d CODE_FOR_subv2df3 ++#define CODE_FOR_lsx_vfmul_s CODE_FOR_mulv4sf3 ++#define CODE_FOR_lsx_vfmul_d CODE_FOR_mulv2df3 ++#define CODE_FOR_lsx_vfdiv_s CODE_FOR_divv4sf3 ++#define CODE_FOR_lsx_vfdiv_d CODE_FOR_divv2df3 ++#define CODE_FOR_lsx_vfmax_s CODE_FOR_smaxv4sf3 ++#define CODE_FOR_lsx_vfmax_d CODE_FOR_smaxv2df3 ++#define CODE_FOR_lsx_vfmin_s CODE_FOR_sminv4sf3 ++#define CODE_FOR_lsx_vfmin_d CODE_FOR_sminv2df3 ++#define CODE_FOR_lsx_vfsqrt_s CODE_FOR_sqrtv4sf2 ++#define CODE_FOR_lsx_vfsqrt_d CODE_FOR_sqrtv2df2 ++#define CODE_FOR_lsx_vflogb_s CODE_FOR_logbv4sf2 ++#define CODE_FOR_lsx_vflogb_d CODE_FOR_logbv2df2 ++#define CODE_FOR_lsx_vmax_b CODE_FOR_smaxv16qi3 ++#define CODE_FOR_lsx_vmax_h CODE_FOR_smaxv8hi3 ++#define CODE_FOR_lsx_vmax_w CODE_FOR_smaxv4si3 ++#define CODE_FOR_lsx_vmax_d CODE_FOR_smaxv2di3 ++#define CODE_FOR_lsx_vmaxi_b CODE_FOR_smaxv16qi3 ++#define CODE_FOR_lsx_vmaxi_h CODE_FOR_smaxv8hi3 ++#define CODE_FOR_lsx_vmaxi_w CODE_FOR_smaxv4si3 ++#define CODE_FOR_lsx_vmaxi_d CODE_FOR_smaxv2di3 ++#define CODE_FOR_lsx_vmax_bu CODE_FOR_umaxv16qi3 ++#define CODE_FOR_lsx_vmax_hu CODE_FOR_umaxv8hi3 ++#define CODE_FOR_lsx_vmax_wu CODE_FOR_umaxv4si3 ++#define CODE_FOR_lsx_vmax_du CODE_FOR_umaxv2di3 ++#define CODE_FOR_lsx_vmaxi_bu CODE_FOR_umaxv16qi3 ++#define CODE_FOR_lsx_vmaxi_hu CODE_FOR_umaxv8hi3 ++#define CODE_FOR_lsx_vmaxi_wu CODE_FOR_umaxv4si3 ++#define CODE_FOR_lsx_vmaxi_du CODE_FOR_umaxv2di3 ++#define CODE_FOR_lsx_vmin_b CODE_FOR_sminv16qi3 ++#define CODE_FOR_lsx_vmin_h CODE_FOR_sminv8hi3 ++#define CODE_FOR_lsx_vmin_w CODE_FOR_sminv4si3 ++#define CODE_FOR_lsx_vmin_d CODE_FOR_sminv2di3 ++#define CODE_FOR_lsx_vmini_b CODE_FOR_sminv16qi3 ++#define CODE_FOR_lsx_vmini_h CODE_FOR_sminv8hi3 ++#define CODE_FOR_lsx_vmini_w CODE_FOR_sminv4si3 ++#define CODE_FOR_lsx_vmini_d CODE_FOR_sminv2di3 ++#define CODE_FOR_lsx_vmin_bu CODE_FOR_uminv16qi3 ++#define CODE_FOR_lsx_vmin_hu CODE_FOR_uminv8hi3 ++#define CODE_FOR_lsx_vmin_wu CODE_FOR_uminv4si3 ++#define CODE_FOR_lsx_vmin_du CODE_FOR_uminv2di3 ++#define CODE_FOR_lsx_vmini_bu CODE_FOR_uminv16qi3 ++#define CODE_FOR_lsx_vmini_hu CODE_FOR_uminv8hi3 ++#define CODE_FOR_lsx_vmini_wu CODE_FOR_uminv4si3 ++#define CODE_FOR_lsx_vmini_du CODE_FOR_uminv2di3 ++#define CODE_FOR_lsx_vmod_b CODE_FOR_modv16qi3 ++#define CODE_FOR_lsx_vmod_h CODE_FOR_modv8hi3 ++#define CODE_FOR_lsx_vmod_w CODE_FOR_modv4si3 ++#define CODE_FOR_lsx_vmod_d CODE_FOR_modv2di3 ++#define CODE_FOR_lsx_vmod_bu CODE_FOR_umodv16qi3 ++#define CODE_FOR_lsx_vmod_hu CODE_FOR_umodv8hi3 ++#define CODE_FOR_lsx_vmod_wu CODE_FOR_umodv4si3 ++#define CODE_FOR_lsx_vmod_du CODE_FOR_umodv2di3 ++#define CODE_FOR_lsx_vmul_b CODE_FOR_mulv16qi3 ++#define CODE_FOR_lsx_vmul_h CODE_FOR_mulv8hi3 ++#define CODE_FOR_lsx_vmul_w CODE_FOR_mulv4si3 ++#define CODE_FOR_lsx_vmul_d CODE_FOR_mulv2di3 ++#define CODE_FOR_lsx_vclz_b CODE_FOR_clzv16qi2 ++#define CODE_FOR_lsx_vclz_h CODE_FOR_clzv8hi2 ++#define CODE_FOR_lsx_vclz_w CODE_FOR_clzv4si2 ++#define CODE_FOR_lsx_vclz_d CODE_FOR_clzv2di2 ++#define CODE_FOR_lsx_vnor_v CODE_FOR_lsx_nor_b ++#define CODE_FOR_lsx_vor_v CODE_FOR_iorv16qi3 ++#define CODE_FOR_lsx_vori_b CODE_FOR_iorv16qi3 ++#define CODE_FOR_lsx_vnori_b CODE_FOR_lsx_nor_b ++#define CODE_FOR_lsx_vpcnt_b CODE_FOR_popcountv16qi2 ++#define CODE_FOR_lsx_vpcnt_h CODE_FOR_popcountv8hi2 ++#define CODE_FOR_lsx_vpcnt_w CODE_FOR_popcountv4si2 ++#define CODE_FOR_lsx_vpcnt_d CODE_FOR_popcountv2di2 ++#define CODE_FOR_lsx_vxor_v CODE_FOR_xorv16qi3 ++#define CODE_FOR_lsx_vxori_b CODE_FOR_xorv16qi3 ++#define CODE_FOR_lsx_vsll_b CODE_FOR_vashlv16qi3 ++#define CODE_FOR_lsx_vsll_h CODE_FOR_vashlv8hi3 ++#define CODE_FOR_lsx_vsll_w CODE_FOR_vashlv4si3 ++#define CODE_FOR_lsx_vsll_d CODE_FOR_vashlv2di3 ++#define CODE_FOR_lsx_vslli_b CODE_FOR_vashlv16qi3 ++#define CODE_FOR_lsx_vslli_h CODE_FOR_vashlv8hi3 ++#define CODE_FOR_lsx_vslli_w CODE_FOR_vashlv4si3 ++#define CODE_FOR_lsx_vslli_d CODE_FOR_vashlv2di3 ++#define CODE_FOR_lsx_vsra_b CODE_FOR_vashrv16qi3 ++#define CODE_FOR_lsx_vsra_h CODE_FOR_vashrv8hi3 ++#define CODE_FOR_lsx_vsra_w CODE_FOR_vashrv4si3 ++#define CODE_FOR_lsx_vsra_d CODE_FOR_vashrv2di3 ++#define CODE_FOR_lsx_vsrai_b CODE_FOR_vashrv16qi3 ++#define CODE_FOR_lsx_vsrai_h CODE_FOR_vashrv8hi3 ++#define CODE_FOR_lsx_vsrai_w CODE_FOR_vashrv4si3 ++#define CODE_FOR_lsx_vsrai_d CODE_FOR_vashrv2di3 ++#define CODE_FOR_lsx_vsrl_b CODE_FOR_vlshrv16qi3 ++#define CODE_FOR_lsx_vsrl_h CODE_FOR_vlshrv8hi3 ++#define CODE_FOR_lsx_vsrl_w CODE_FOR_vlshrv4si3 ++#define CODE_FOR_lsx_vsrl_d CODE_FOR_vlshrv2di3 ++#define CODE_FOR_lsx_vsrli_b CODE_FOR_vlshrv16qi3 ++#define CODE_FOR_lsx_vsrli_h CODE_FOR_vlshrv8hi3 ++#define CODE_FOR_lsx_vsrli_w CODE_FOR_vlshrv4si3 ++#define CODE_FOR_lsx_vsrli_d CODE_FOR_vlshrv2di3 ++#define CODE_FOR_lsx_vsub_b CODE_FOR_subv16qi3 ++#define CODE_FOR_lsx_vsub_h CODE_FOR_subv8hi3 ++#define CODE_FOR_lsx_vsub_w CODE_FOR_subv4si3 ++#define CODE_FOR_lsx_vsub_d CODE_FOR_subv2di3 ++#define CODE_FOR_lsx_vsubi_bu CODE_FOR_subv16qi3 ++#define CODE_FOR_lsx_vsubi_hu CODE_FOR_subv8hi3 ++#define CODE_FOR_lsx_vsubi_wu CODE_FOR_subv4si3 ++#define CODE_FOR_lsx_vsubi_du CODE_FOR_subv2di3 ++ ++#define CODE_FOR_lsx_vpackod_d CODE_FOR_lsx_vilvh_d ++#define CODE_FOR_lsx_vpackev_d CODE_FOR_lsx_vilvl_d ++#define CODE_FOR_lsx_vpickod_d CODE_FOR_lsx_vilvh_d ++#define CODE_FOR_lsx_vpickev_d CODE_FOR_lsx_vilvl_d ++ ++#define CODE_FOR_lsx_vrepli_b CODE_FOR_lsx_vrepliv16qi ++#define CODE_FOR_lsx_vrepli_h CODE_FOR_lsx_vrepliv8hi ++#define CODE_FOR_lsx_vrepli_w CODE_FOR_lsx_vrepliv4si ++#define CODE_FOR_lsx_vrepli_d CODE_FOR_lsx_vrepliv2di ++#define CODE_FOR_lsx_vsat_b CODE_FOR_lsx_vsat_s_b ++#define CODE_FOR_lsx_vsat_h CODE_FOR_lsx_vsat_s_h ++#define CODE_FOR_lsx_vsat_w CODE_FOR_lsx_vsat_s_w ++#define CODE_FOR_lsx_vsat_d CODE_FOR_lsx_vsat_s_d ++#define CODE_FOR_lsx_vsat_bu CODE_FOR_lsx_vsat_u_bu ++#define CODE_FOR_lsx_vsat_hu CODE_FOR_lsx_vsat_u_hu ++#define CODE_FOR_lsx_vsat_wu CODE_FOR_lsx_vsat_u_wu ++#define CODE_FOR_lsx_vsat_du CODE_FOR_lsx_vsat_u_du ++#define CODE_FOR_lsx_vavg_b CODE_FOR_lsx_vavg_s_b ++#define CODE_FOR_lsx_vavg_h CODE_FOR_lsx_vavg_s_h ++#define CODE_FOR_lsx_vavg_w CODE_FOR_lsx_vavg_s_w ++#define CODE_FOR_lsx_vavg_d CODE_FOR_lsx_vavg_s_d ++#define CODE_FOR_lsx_vavg_bu CODE_FOR_lsx_vavg_u_bu ++#define CODE_FOR_lsx_vavg_hu CODE_FOR_lsx_vavg_u_hu ++#define CODE_FOR_lsx_vavg_wu CODE_FOR_lsx_vavg_u_wu ++#define CODE_FOR_lsx_vavg_du CODE_FOR_lsx_vavg_u_du ++#define CODE_FOR_lsx_vavgr_b CODE_FOR_lsx_vavgr_s_b ++#define CODE_FOR_lsx_vavgr_h CODE_FOR_lsx_vavgr_s_h ++#define CODE_FOR_lsx_vavgr_w CODE_FOR_lsx_vavgr_s_w ++#define CODE_FOR_lsx_vavgr_d CODE_FOR_lsx_vavgr_s_d ++#define CODE_FOR_lsx_vavgr_bu CODE_FOR_lsx_vavgr_u_bu ++#define CODE_FOR_lsx_vavgr_hu CODE_FOR_lsx_vavgr_u_hu ++#define CODE_FOR_lsx_vavgr_wu CODE_FOR_lsx_vavgr_u_wu ++#define CODE_FOR_lsx_vavgr_du CODE_FOR_lsx_vavgr_u_du ++#define CODE_FOR_lsx_vssub_b CODE_FOR_lsx_vssub_s_b ++#define CODE_FOR_lsx_vssub_h CODE_FOR_lsx_vssub_s_h ++#define CODE_FOR_lsx_vssub_w CODE_FOR_lsx_vssub_s_w ++#define CODE_FOR_lsx_vssub_d CODE_FOR_lsx_vssub_s_d ++#define CODE_FOR_lsx_vssub_bu CODE_FOR_lsx_vssub_u_bu ++#define CODE_FOR_lsx_vssub_hu CODE_FOR_lsx_vssub_u_hu ++#define CODE_FOR_lsx_vssub_wu CODE_FOR_lsx_vssub_u_wu ++#define CODE_FOR_lsx_vssub_du CODE_FOR_lsx_vssub_u_du ++#define CODE_FOR_lsx_vabsd_b CODE_FOR_lsx_vabsd_s_b ++#define CODE_FOR_lsx_vabsd_h CODE_FOR_lsx_vabsd_s_h ++#define CODE_FOR_lsx_vabsd_w CODE_FOR_lsx_vabsd_s_w ++#define CODE_FOR_lsx_vabsd_d CODE_FOR_lsx_vabsd_s_d ++#define CODE_FOR_lsx_vabsd_bu CODE_FOR_lsx_vabsd_u_bu ++#define CODE_FOR_lsx_vabsd_hu CODE_FOR_lsx_vabsd_u_hu ++#define CODE_FOR_lsx_vabsd_wu CODE_FOR_lsx_vabsd_u_wu ++#define CODE_FOR_lsx_vabsd_du CODE_FOR_lsx_vabsd_u_du ++#define CODE_FOR_lsx_vftint_w_s CODE_FOR_lsx_vftint_s_w_s ++#define CODE_FOR_lsx_vftint_l_d CODE_FOR_lsx_vftint_s_l_d ++#define CODE_FOR_lsx_vftint_wu_s CODE_FOR_lsx_vftint_u_wu_s ++#define CODE_FOR_lsx_vftint_lu_d CODE_FOR_lsx_vftint_u_lu_d ++#define CODE_FOR_lsx_vandn_v CODE_FOR_vandnv16qi3 ++#define CODE_FOR_lsx_vorn_v CODE_FOR_vornv16qi3 ++#define CODE_FOR_lsx_vneg_b CODE_FOR_vnegv16qi2 ++#define CODE_FOR_lsx_vneg_h CODE_FOR_vnegv8hi2 ++#define CODE_FOR_lsx_vneg_w CODE_FOR_vnegv4si2 ++#define CODE_FOR_lsx_vneg_d CODE_FOR_vnegv2di2 ++#define CODE_FOR_lsx_vshuf4i_d CODE_FOR_lsx_vshuf4i_d ++#define CODE_FOR_lsx_vbsrl_v CODE_FOR_lsx_vbsrl_b ++#define CODE_FOR_lsx_vbsll_v CODE_FOR_lsx_vbsll_b ++#define CODE_FOR_lsx_vfmadd_s CODE_FOR_fmav4sf4 ++#define CODE_FOR_lsx_vfmadd_d CODE_FOR_fmav2df4 ++#define CODE_FOR_lsx_vfmsub_s CODE_FOR_fmsv4sf4 ++#define CODE_FOR_lsx_vfmsub_d CODE_FOR_fmsv2df4 ++#define CODE_FOR_lsx_vfnmadd_s CODE_FOR_vfnmaddv4sf4_nmadd4 ++#define CODE_FOR_lsx_vfnmadd_d CODE_FOR_vfnmaddv2df4_nmadd4 ++#define CODE_FOR_lsx_vfnmsub_s CODE_FOR_vfnmsubv4sf4_nmsub4 ++#define CODE_FOR_lsx_vfnmsub_d CODE_FOR_vfnmsubv2df4_nmsub4 ++ ++#define CODE_FOR_lsx_vmuh_b CODE_FOR_lsx_vmuh_s_b ++#define CODE_FOR_lsx_vmuh_h CODE_FOR_lsx_vmuh_s_h ++#define CODE_FOR_lsx_vmuh_w CODE_FOR_lsx_vmuh_s_w ++#define CODE_FOR_lsx_vmuh_d CODE_FOR_lsx_vmuh_s_d ++#define CODE_FOR_lsx_vmuh_bu CODE_FOR_lsx_vmuh_u_bu ++#define CODE_FOR_lsx_vmuh_hu CODE_FOR_lsx_vmuh_u_hu ++#define CODE_FOR_lsx_vmuh_wu CODE_FOR_lsx_vmuh_u_wu ++#define CODE_FOR_lsx_vmuh_du CODE_FOR_lsx_vmuh_u_du ++#define CODE_FOR_lsx_vsllwil_h_b CODE_FOR_lsx_vsllwil_s_h_b ++#define CODE_FOR_lsx_vsllwil_w_h CODE_FOR_lsx_vsllwil_s_w_h ++#define CODE_FOR_lsx_vsllwil_d_w CODE_FOR_lsx_vsllwil_s_d_w ++#define CODE_FOR_lsx_vsllwil_hu_bu CODE_FOR_lsx_vsllwil_u_hu_bu ++#define CODE_FOR_lsx_vsllwil_wu_hu CODE_FOR_lsx_vsllwil_u_wu_hu ++#define CODE_FOR_lsx_vsllwil_du_wu CODE_FOR_lsx_vsllwil_u_du_wu ++#define CODE_FOR_lsx_vssran_b_h CODE_FOR_lsx_vssran_s_b_h ++#define CODE_FOR_lsx_vssran_h_w CODE_FOR_lsx_vssran_s_h_w ++#define CODE_FOR_lsx_vssran_w_d CODE_FOR_lsx_vssran_s_w_d ++#define CODE_FOR_lsx_vssran_bu_h CODE_FOR_lsx_vssran_u_bu_h ++#define CODE_FOR_lsx_vssran_hu_w CODE_FOR_lsx_vssran_u_hu_w ++#define CODE_FOR_lsx_vssran_wu_d CODE_FOR_lsx_vssran_u_wu_d ++#define CODE_FOR_lsx_vssrarn_b_h CODE_FOR_lsx_vssrarn_s_b_h ++#define CODE_FOR_lsx_vssrarn_h_w CODE_FOR_lsx_vssrarn_s_h_w ++#define CODE_FOR_lsx_vssrarn_w_d CODE_FOR_lsx_vssrarn_s_w_d ++#define CODE_FOR_lsx_vssrarn_bu_h CODE_FOR_lsx_vssrarn_u_bu_h ++#define CODE_FOR_lsx_vssrarn_hu_w CODE_FOR_lsx_vssrarn_u_hu_w ++#define CODE_FOR_lsx_vssrarn_wu_d CODE_FOR_lsx_vssrarn_u_wu_d ++#define CODE_FOR_lsx_vssrln_bu_h CODE_FOR_lsx_vssrln_u_bu_h ++#define CODE_FOR_lsx_vssrln_hu_w CODE_FOR_lsx_vssrln_u_hu_w ++#define CODE_FOR_lsx_vssrln_wu_d CODE_FOR_lsx_vssrln_u_wu_d ++#define CODE_FOR_lsx_vssrlrn_bu_h CODE_FOR_lsx_vssrlrn_u_bu_h ++#define CODE_FOR_lsx_vssrlrn_hu_w CODE_FOR_lsx_vssrlrn_u_hu_w ++#define CODE_FOR_lsx_vssrlrn_wu_d CODE_FOR_lsx_vssrlrn_u_wu_d ++ + static const struct loongarch_builtin_description loongarch_builtins[] = { + #define LARCH_MOVFCSR2GR 0 + DIRECT_BUILTIN (movfcsr2gr, LARCH_USI_FTYPE_UQI, hard_float), +@@ -184,6 +489,727 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { + DIRECT_NO_TARGET_BUILTIN (asrtgt_d, LARCH_VOID_FTYPE_DI_DI, default), + DIRECT_NO_TARGET_BUILTIN (syscall, LARCH_VOID_FTYPE_USI, default), + DIRECT_NO_TARGET_BUILTIN (break, LARCH_VOID_FTYPE_USI, default), ++ ++ /* Built-in functions for LSX. */ ++ LSX_BUILTIN (vsll_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsll_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsll_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsll_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vslli_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vslli_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vslli_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vslli_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsra_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsra_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsra_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsra_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsrai_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsrai_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsrai_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsrai_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsrar_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsrar_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrar_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrar_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsrari_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsrari_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsrari_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsrari_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsrl_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsrl_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrl_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrl_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsrli_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsrli_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsrli_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsrli_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsrlr_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsrlr_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrlr_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrlr_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsrlri_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsrlri_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsrlri_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsrlri_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vbitclr_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vbitclr_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vbitclr_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vbitclr_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vbitclri_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vbitclri_h, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vbitclri_w, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vbitclri_d, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vbitset_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vbitset_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vbitset_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vbitset_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vbitseti_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vbitseti_h, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vbitseti_w, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vbitseti_d, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vbitrev_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vbitrev_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vbitrev_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vbitrev_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vbitrevi_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vbitrevi_h, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vbitrevi_w, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vbitrevi_d, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vaddi_bu, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vaddi_hu, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vaddi_wu, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vaddi_du, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsub_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsub_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsub_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsub_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsubi_bu, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsubi_hu, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsubi_wu, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsubi_du, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vmax_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmax_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmax_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmax_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmaxi_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vmaxi_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vmaxi_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vmaxi_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vmax_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmax_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmax_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmax_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmaxi_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vmaxi_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vmaxi_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vmaxi_du, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vmin_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmin_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmin_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmin_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmini_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vmini_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vmini_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vmini_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vmin_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmin_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmin_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmin_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmini_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vmini_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vmini_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vmini_du, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vseq_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vseq_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vseq_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vseq_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vseqi_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vseqi_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vseqi_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vseqi_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vslti_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vslt_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vslt_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vslt_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vslt_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vslti_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vslti_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vslti_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vslt_bu, LARCH_V16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vslt_hu, LARCH_V8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vslt_wu, LARCH_V4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vslt_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vslti_bu, LARCH_V16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vslti_hu, LARCH_V8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vslti_wu, LARCH_V4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vslti_du, LARCH_V2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vsle_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsle_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsle_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsle_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vslei_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vslei_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vslei_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vslei_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vsle_bu, LARCH_V16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vsle_hu, LARCH_V8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vsle_wu, LARCH_V4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vsle_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vslei_bu, LARCH_V16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vslei_hu, LARCH_V8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vslei_wu, LARCH_V4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vslei_du, LARCH_V2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vsat_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsat_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsat_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsat_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsat_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vsat_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vsat_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vsat_du, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vadda_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vadda_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vadda_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vadda_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsadd_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vsadd_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vsadd_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vsadd_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vavg_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vavg_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vavg_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vavg_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vavg_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vavg_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vavg_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vavg_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vavgr_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vavgr_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vavgr_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vavgr_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vavgr_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vavgr_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vavgr_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vavgr_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vssub_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vssub_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssub_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssub_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssub_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vssub_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssub_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssub_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vabsd_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vabsd_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vabsd_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vabsd_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vabsd_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vabsd_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vabsd_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vabsd_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmul_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmul_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmul_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmul_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), ++ LSX_BUILTIN (vmadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), ++ LSX_BUILTIN (vmadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI), ++ LSX_BUILTIN (vmadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vmsub_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), ++ LSX_BUILTIN (vmsub_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), ++ LSX_BUILTIN (vmsub_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI), ++ LSX_BUILTIN (vmsub_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vdiv_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vdiv_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vdiv_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vdiv_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vdiv_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vdiv_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vdiv_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vdiv_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vhaddw_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vhaddw_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vhaddw_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vhaddw_hu_bu, LARCH_UV8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vhaddw_wu_hu, LARCH_UV4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vhaddw_du_wu, LARCH_UV2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vhsubw_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vhsubw_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vhsubw_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vhsubw_hu_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vhsubw_wu_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vhsubw_du_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmod_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmod_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmod_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmod_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmod_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmod_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmod_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmod_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vreplve_b, LARCH_V16QI_FTYPE_V16QI_SI), ++ LSX_BUILTIN (vreplve_h, LARCH_V8HI_FTYPE_V8HI_SI), ++ LSX_BUILTIN (vreplve_w, LARCH_V4SI_FTYPE_V4SI_SI), ++ LSX_BUILTIN (vreplve_d, LARCH_V2DI_FTYPE_V2DI_SI), ++ LSX_BUILTIN (vreplvei_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vreplvei_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vreplvei_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vreplvei_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vpickev_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vpickev_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vpickev_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vpickev_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vpickod_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vpickod_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vpickod_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vpickod_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vilvh_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vilvh_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vilvh_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vilvh_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vilvl_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vilvl_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vilvl_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vilvl_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vpackev_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vpackev_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vpackev_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vpackev_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vpackod_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vpackod_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vpackod_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vpackod_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vshuf_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), ++ LSX_BUILTIN (vshuf_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI), ++ LSX_BUILTIN (vshuf_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vand_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vandi_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vnor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vnori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vxor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vxori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vbitsel_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI), ++ LSX_BUILTIN (vbitseli_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI_USI), ++ LSX_BUILTIN (vshuf4i_b, LARCH_V16QI_FTYPE_V16QI_USI), ++ LSX_BUILTIN (vshuf4i_h, LARCH_V8HI_FTYPE_V8HI_USI), ++ LSX_BUILTIN (vshuf4i_w, LARCH_V4SI_FTYPE_V4SI_USI), ++ LSX_BUILTIN (vreplgr2vr_b, LARCH_V16QI_FTYPE_SI), ++ LSX_BUILTIN (vreplgr2vr_h, LARCH_V8HI_FTYPE_SI), ++ LSX_BUILTIN (vreplgr2vr_w, LARCH_V4SI_FTYPE_SI), ++ LSX_BUILTIN (vreplgr2vr_d, LARCH_V2DI_FTYPE_DI), ++ LSX_BUILTIN (vpcnt_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vpcnt_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vpcnt_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vpcnt_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vclo_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vclo_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vclo_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vclo_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vclz_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vclz_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vclz_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vclz_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vpickve2gr_b, LARCH_SI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vpickve2gr_h, LARCH_SI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vpickve2gr_w, LARCH_SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vpickve2gr_d, LARCH_DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vpickve2gr_bu, LARCH_USI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vpickve2gr_hu, LARCH_USI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vpickve2gr_wu, LARCH_USI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vpickve2gr_du, LARCH_UDI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vinsgr2vr_b, LARCH_V16QI_FTYPE_V16QI_SI_UQI), ++ LSX_BUILTIN (vinsgr2vr_h, LARCH_V8HI_FTYPE_V8HI_SI_UQI), ++ LSX_BUILTIN (vinsgr2vr_w, LARCH_V4SI_FTYPE_V4SI_SI_UQI), ++ LSX_BUILTIN (vinsgr2vr_d, LARCH_V2DI_FTYPE_V2DI_DI_UQI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_b, LARCH_SI_FTYPE_UV16QI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_h, LARCH_SI_FTYPE_UV8HI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_w, LARCH_SI_FTYPE_UV4SI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_d, LARCH_SI_FTYPE_UV2DI), ++ LSX_BUILTIN_TEST_BRANCH (bz_b, LARCH_SI_FTYPE_UV16QI), ++ LSX_BUILTIN_TEST_BRANCH (bz_h, LARCH_SI_FTYPE_UV8HI), ++ LSX_BUILTIN_TEST_BRANCH (bz_w, LARCH_SI_FTYPE_UV4SI), ++ LSX_BUILTIN_TEST_BRANCH (bz_d, LARCH_SI_FTYPE_UV2DI), ++ LSX_BUILTIN_TEST_BRANCH (bz_v, LARCH_SI_FTYPE_UV16QI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_v, LARCH_SI_FTYPE_UV16QI), ++ LSX_BUILTIN (vrepli_b, LARCH_V16QI_FTYPE_HI), ++ LSX_BUILTIN (vrepli_h, LARCH_V8HI_FTYPE_HI), ++ LSX_BUILTIN (vrepli_w, LARCH_V4SI_FTYPE_HI), ++ LSX_BUILTIN (vrepli_d, LARCH_V2DI_FTYPE_HI), ++ LSX_BUILTIN (vfcmp_caf_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_caf_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cor_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cor_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cun_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cun_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cune_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cune_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cueq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cueq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_ceq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_ceq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cne_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cne_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_clt_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_clt_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cult_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cult_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cle_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cle_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cule_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cule_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_saf_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_saf_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sor_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sor_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sun_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sun_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sune_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sune_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sueq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sueq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_seq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_seq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sne_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sne_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_slt_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_slt_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sult_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sult_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sle_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sle_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sule_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sule_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmul_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmul_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfdiv_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfdiv_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcvt_h_s, LARCH_V8HI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcvt_s_d, LARCH_V4SF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmin_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmin_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmina_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmina_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmax_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmax_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmaxa_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmaxa_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfclass_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vfclass_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vfsqrt_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfsqrt_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrecip_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrecip_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrint_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrint_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrsqrt_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrsqrt_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vflogb_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vflogb_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfcvth_s_h, LARCH_V4SF_FTYPE_V8HI), ++ LSX_BUILTIN (vfcvth_d_s, LARCH_V2DF_FTYPE_V4SF), ++ LSX_BUILTIN (vfcvtl_s_h, LARCH_V4SF_FTYPE_V8HI), ++ LSX_BUILTIN (vfcvtl_d_s, LARCH_V2DF_FTYPE_V4SF), ++ LSX_BUILTIN (vftint_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftint_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftint_wu_s, LARCH_UV4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftint_lu_d, LARCH_UV2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftintrz_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrz_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftintrz_wu_s, LARCH_UV4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrz_lu_d, LARCH_UV2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vffint_s_w, LARCH_V4SF_FTYPE_V4SI), ++ LSX_BUILTIN (vffint_d_l, LARCH_V2DF_FTYPE_V2DI), ++ LSX_BUILTIN (vffint_s_wu, LARCH_V4SF_FTYPE_UV4SI), ++ LSX_BUILTIN (vffint_d_lu, LARCH_V2DF_FTYPE_UV2DI), ++ ++ LSX_BUILTIN (vandn_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vneg_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vneg_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vneg_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vneg_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vmuh_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmuh_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmuh_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmuh_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmuh_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmuh_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmuh_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmuh_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsllwil_h_b, LARCH_V8HI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsllwil_w_h, LARCH_V4SI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsllwil_d_w, LARCH_V2DI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsllwil_hu_bu, LARCH_UV8HI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vsllwil_wu_hu, LARCH_UV4SI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vsllwil_du_wu, LARCH_UV2DI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vsran_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsran_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsran_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssran_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssran_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssran_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssran_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssran_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssran_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsrarn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrarn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrarn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrarn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssrarn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssrarn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrarn_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssrarn_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssrarn_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsrln_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrln_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrln_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrln_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssrln_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssrln_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsrlrn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrlrn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrlrn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrlrn_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssrlrn_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssrlrn_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vfrstpi_b, LARCH_V16QI_FTYPE_V16QI_V16QI_UQI), ++ LSX_BUILTIN (vfrstpi_h, LARCH_V8HI_FTYPE_V8HI_V8HI_UQI), ++ LSX_BUILTIN (vfrstp_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), ++ LSX_BUILTIN (vfrstp_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), ++ LSX_BUILTIN (vshuf4i_d, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vbsrl_v, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vbsll_v, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vextrins_b, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vextrins_h, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vextrins_w, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vextrins_d, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vmskltz_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vmskltz_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vmskltz_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vmskltz_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vsigncov_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsigncov_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsigncov_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsigncov_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vfmadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), ++ LSX_BUILTIN (vfmadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), ++ LSX_BUILTIN (vfmsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), ++ LSX_BUILTIN (vfmsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), ++ LSX_BUILTIN (vfnmadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), ++ LSX_BUILTIN (vfnmadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), ++ LSX_BUILTIN (vfnmsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), ++ LSX_BUILTIN (vfnmsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), ++ LSX_BUILTIN (vftintrne_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrne_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftintrp_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrp_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftintrm_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrm_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftint_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vffint_s_l, LARCH_V4SF_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vftintrz_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vftintrp_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vftintrm_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vftintrne_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vftintl_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftinth_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vffinth_d_w, LARCH_V2DF_FTYPE_V4SI), ++ LSX_BUILTIN (vffintl_d_w, LARCH_V2DF_FTYPE_V4SI), ++ LSX_BUILTIN (vftintrzl_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrzh_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrpl_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrph_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrml_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrmh_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrnel_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrneh_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrne_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrne_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrintrz_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrz_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrintrp_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrp_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrintrm_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrm_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_NO_TARGET_BUILTIN (vstelm_b, LARCH_VOID_FTYPE_V16QI_CVPOINTER_SI_UQI), ++ LSX_NO_TARGET_BUILTIN (vstelm_h, LARCH_VOID_FTYPE_V8HI_CVPOINTER_SI_UQI), ++ LSX_NO_TARGET_BUILTIN (vstelm_w, LARCH_VOID_FTYPE_V4SI_CVPOINTER_SI_UQI), ++ LSX_NO_TARGET_BUILTIN (vstelm_d, LARCH_VOID_FTYPE_V2DI_CVPOINTER_SI_UQI), ++ LSX_BUILTIN (vaddwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vaddwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vaddwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vaddwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vaddwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vaddwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vaddwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vaddwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vaddwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vaddwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vaddwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vaddwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vaddwev_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), ++ LSX_BUILTIN (vaddwev_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), ++ LSX_BUILTIN (vaddwev_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), ++ LSX_BUILTIN (vaddwod_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), ++ LSX_BUILTIN (vaddwod_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), ++ LSX_BUILTIN (vaddwod_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), ++ LSX_BUILTIN (vsubwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsubwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsubwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsubwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsubwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsubwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsubwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vsubwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vsubwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vsubwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vsubwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vsubwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vaddwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vaddwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vaddwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vaddwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsubwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsubwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsubwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsubwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vaddwev_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), ++ LSX_BUILTIN (vaddwod_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), ++ ++ LSX_BUILTIN (vmulwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmulwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmulwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmulwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmulwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmulwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmulwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmulwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmulwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmulwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmulwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmulwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmulwev_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), ++ LSX_BUILTIN (vmulwev_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), ++ LSX_BUILTIN (vmulwev_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), ++ LSX_BUILTIN (vmulwod_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), ++ LSX_BUILTIN (vmulwod_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), ++ LSX_BUILTIN (vmulwod_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), ++ LSX_BUILTIN (vmulwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmulwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmulwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmulwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmulwev_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), ++ LSX_BUILTIN (vmulwod_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), ++ LSX_BUILTIN (vhaddw_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vhaddw_qu_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vhsubw_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vhsubw_qu_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmaddwev_d_w, LARCH_V2DI_FTYPE_V2DI_V4SI_V4SI), ++ LSX_BUILTIN (vmaddwev_w_h, LARCH_V4SI_FTYPE_V4SI_V8HI_V8HI), ++ LSX_BUILTIN (vmaddwev_h_b, LARCH_V8HI_FTYPE_V8HI_V16QI_V16QI), ++ LSX_BUILTIN (vmaddwev_d_wu, LARCH_UV2DI_FTYPE_UV2DI_UV4SI_UV4SI), ++ LSX_BUILTIN (vmaddwev_w_hu, LARCH_UV4SI_FTYPE_UV4SI_UV8HI_UV8HI), ++ LSX_BUILTIN (vmaddwev_h_bu, LARCH_UV8HI_FTYPE_UV8HI_UV16QI_UV16QI), ++ LSX_BUILTIN (vmaddwod_d_w, LARCH_V2DI_FTYPE_V2DI_V4SI_V4SI), ++ LSX_BUILTIN (vmaddwod_w_h, LARCH_V4SI_FTYPE_V4SI_V8HI_V8HI), ++ LSX_BUILTIN (vmaddwod_h_b, LARCH_V8HI_FTYPE_V8HI_V16QI_V16QI), ++ LSX_BUILTIN (vmaddwod_d_wu, LARCH_UV2DI_FTYPE_UV2DI_UV4SI_UV4SI), ++ LSX_BUILTIN (vmaddwod_w_hu, LARCH_UV4SI_FTYPE_UV4SI_UV8HI_UV8HI), ++ LSX_BUILTIN (vmaddwod_h_bu, LARCH_UV8HI_FTYPE_UV8HI_UV16QI_UV16QI), ++ LSX_BUILTIN (vmaddwev_d_wu_w, LARCH_V2DI_FTYPE_V2DI_UV4SI_V4SI), ++ LSX_BUILTIN (vmaddwev_w_hu_h, LARCH_V4SI_FTYPE_V4SI_UV8HI_V8HI), ++ LSX_BUILTIN (vmaddwev_h_bu_b, LARCH_V8HI_FTYPE_V8HI_UV16QI_V16QI), ++ LSX_BUILTIN (vmaddwod_d_wu_w, LARCH_V2DI_FTYPE_V2DI_UV4SI_V4SI), ++ LSX_BUILTIN (vmaddwod_w_hu_h, LARCH_V4SI_FTYPE_V4SI_UV8HI_V8HI), ++ LSX_BUILTIN (vmaddwod_h_bu_b, LARCH_V8HI_FTYPE_V8HI_UV16QI_V16QI), ++ LSX_BUILTIN (vmaddwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vmaddwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vmaddwev_q_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI), ++ LSX_BUILTIN (vmaddwod_q_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI), ++ LSX_BUILTIN (vmaddwev_q_du_d, LARCH_V2DI_FTYPE_V2DI_UV2DI_V2DI), ++ LSX_BUILTIN (vmaddwod_q_du_d, LARCH_V2DI_FTYPE_V2DI_UV2DI_V2DI), ++ LSX_BUILTIN (vrotr_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vrotr_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vrotr_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vrotr_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vadd_q, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsub_q, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vldrepl_b, LARCH_V16QI_FTYPE_CVPOINTER_SI), ++ LSX_BUILTIN (vldrepl_h, LARCH_V8HI_FTYPE_CVPOINTER_SI), ++ LSX_BUILTIN (vldrepl_w, LARCH_V4SI_FTYPE_CVPOINTER_SI), ++ LSX_BUILTIN (vldrepl_d, LARCH_V2DI_FTYPE_CVPOINTER_SI), ++ ++ LSX_BUILTIN (vmskgez_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vmsknz_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vexth_h_b, LARCH_V8HI_FTYPE_V16QI), ++ LSX_BUILTIN (vexth_w_h, LARCH_V4SI_FTYPE_V8HI), ++ LSX_BUILTIN (vexth_d_w, LARCH_V2DI_FTYPE_V4SI), ++ LSX_BUILTIN (vexth_q_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vexth_hu_bu, LARCH_UV8HI_FTYPE_UV16QI), ++ LSX_BUILTIN (vexth_wu_hu, LARCH_UV4SI_FTYPE_UV8HI), ++ LSX_BUILTIN (vexth_du_wu, LARCH_UV2DI_FTYPE_UV4SI), ++ LSX_BUILTIN (vexth_qu_du, LARCH_UV2DI_FTYPE_UV2DI), ++ LSX_BUILTIN (vrotri_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vrotri_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vrotri_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vrotri_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vextl_q_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vsrlni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vsrlni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vsrlni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vsrlni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vsrlrni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vsrlrni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vsrlrni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vsrlrni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrlni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vssrlni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vssrlni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vssrlni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrlni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), ++ LSX_BUILTIN (vssrlni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), ++ LSX_BUILTIN (vssrlni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), ++ LSX_BUILTIN (vssrlni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), ++ LSX_BUILTIN (vssrlrni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vssrlrni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vssrlrni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vssrlrni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrlrni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), ++ LSX_BUILTIN (vssrlrni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), ++ LSX_BUILTIN (vssrlrni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), ++ LSX_BUILTIN (vssrlrni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), ++ LSX_BUILTIN (vsrani_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vsrani_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vsrani_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vsrani_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vsrarni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vsrarni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vsrarni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vsrarni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrani_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vssrani_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vssrani_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vssrani_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrani_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), ++ LSX_BUILTIN (vssrani_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), ++ LSX_BUILTIN (vssrani_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), ++ LSX_BUILTIN (vssrani_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), ++ LSX_BUILTIN (vssrarni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vssrarni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vssrarni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vssrarni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrarni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), ++ LSX_BUILTIN (vssrarni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), ++ LSX_BUILTIN (vssrarni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), ++ LSX_BUILTIN (vssrarni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), ++ LSX_BUILTIN (vpermi_w, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vld, LARCH_V16QI_FTYPE_CVPOINTER_SI), ++ LSX_NO_TARGET_BUILTIN (vst, LARCH_VOID_FTYPE_V16QI_CVPOINTER_SI), ++ LSX_BUILTIN (vssrlrn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssrlrn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssrlrn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrln_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssrln_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssrln_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vorn_v, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vldi, LARCH_V2DI_FTYPE_HI), ++ LSX_BUILTIN (vshuf_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), ++ LSX_BUILTIN (vldx, LARCH_V16QI_FTYPE_CVPOINTER_DI), ++ LSX_NO_TARGET_BUILTIN (vstx, LARCH_VOID_FTYPE_V16QI_CVPOINTER_DI), ++ LSX_BUILTIN (vextl_qu_du, LARCH_UV2DI_FTYPE_UV2DI) + }; + + /* Index I is the function declaration for loongarch_builtins[I], or null if +@@ -193,11 +1219,46 @@ static GTY (()) tree loongarch_builtin_decls[ARRAY_SIZE (loongarch_builtins)]; + using the instruction code or return null if not defined for the target. */ + static GTY (()) int loongarch_get_builtin_decl_index[NUM_INSN_CODES]; + ++ ++/* MODE is a vector mode whose elements have type TYPE. Return the type ++ of the vector itself. */ ++ ++static tree ++loongarch_builtin_vector_type (tree type, machine_mode mode) ++{ ++ static tree types[2 * (int) MAX_MACHINE_MODE]; ++ int mode_index; ++ ++ mode_index = (int) mode; ++ ++ if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type)) ++ mode_index += MAX_MACHINE_MODE; ++ ++ if (types[mode_index] == NULL_TREE) ++ types[mode_index] = build_vector_type_for_mode (type, mode); ++ return types[mode_index]; ++} ++ ++/* Return a type for 'const volatile void *'. */ ++ ++static tree ++loongarch_build_cvpointer_type (void) ++{ ++ static tree cache; ++ ++ if (cache == NULL_TREE) ++ cache = build_pointer_type (build_qualified_type (void_type_node, ++ TYPE_QUAL_CONST ++ | TYPE_QUAL_VOLATILE)); ++ return cache; ++} ++ + /* Source-level argument types. */ + #define LARCH_ATYPE_VOID void_type_node + #define LARCH_ATYPE_INT integer_type_node + #define LARCH_ATYPE_POINTER ptr_type_node +- ++#define LARCH_ATYPE_CVPOINTER loongarch_build_cvpointer_type () ++#define LARCH_ATYPE_BOOLEAN boolean_type_node + /* Standard mode-based argument types. */ + #define LARCH_ATYPE_QI intQI_type_node + #define LARCH_ATYPE_UQI unsigned_intQI_type_node +@@ -210,6 +1271,72 @@ static GTY (()) int loongarch_get_builtin_decl_index[NUM_INSN_CODES]; + #define LARCH_ATYPE_SF float_type_node + #define LARCH_ATYPE_DF double_type_node + ++/* Vector argument types. */ ++#define LARCH_ATYPE_V2SF \ ++ loongarch_builtin_vector_type (float_type_node, V2SFmode) ++#define LARCH_ATYPE_V2HI \ ++ loongarch_builtin_vector_type (intHI_type_node, V2HImode) ++#define LARCH_ATYPE_V2SI \ ++ loongarch_builtin_vector_type (intSI_type_node, V2SImode) ++#define LARCH_ATYPE_V4QI \ ++ loongarch_builtin_vector_type (intQI_type_node, V4QImode) ++#define LARCH_ATYPE_V4HI \ ++ loongarch_builtin_vector_type (intHI_type_node, V4HImode) ++#define LARCH_ATYPE_V8QI \ ++ loongarch_builtin_vector_type (intQI_type_node, V8QImode) ++ ++#define LARCH_ATYPE_V2DI \ ++ loongarch_builtin_vector_type (long_long_integer_type_node, V2DImode) ++#define LARCH_ATYPE_V4SI \ ++ loongarch_builtin_vector_type (intSI_type_node, V4SImode) ++#define LARCH_ATYPE_V8HI \ ++ loongarch_builtin_vector_type (intHI_type_node, V8HImode) ++#define LARCH_ATYPE_V16QI \ ++ loongarch_builtin_vector_type (intQI_type_node, V16QImode) ++#define LARCH_ATYPE_V2DF \ ++ loongarch_builtin_vector_type (double_type_node, V2DFmode) ++#define LARCH_ATYPE_V4SF \ ++ loongarch_builtin_vector_type (float_type_node, V4SFmode) ++ ++/* LoongArch ASX. */ ++#define LARCH_ATYPE_V4DI \ ++ loongarch_builtin_vector_type (long_long_integer_type_node, V4DImode) ++#define LARCH_ATYPE_V8SI \ ++ loongarch_builtin_vector_type (intSI_type_node, V8SImode) ++#define LARCH_ATYPE_V16HI \ ++ loongarch_builtin_vector_type (intHI_type_node, V16HImode) ++#define LARCH_ATYPE_V32QI \ ++ loongarch_builtin_vector_type (intQI_type_node, V32QImode) ++#define LARCH_ATYPE_V4DF \ ++ loongarch_builtin_vector_type (double_type_node, V4DFmode) ++#define LARCH_ATYPE_V8SF \ ++ loongarch_builtin_vector_type (float_type_node, V8SFmode) ++ ++#define LARCH_ATYPE_UV2DI \ ++ loongarch_builtin_vector_type (long_long_unsigned_type_node, V2DImode) ++#define LARCH_ATYPE_UV4SI \ ++ loongarch_builtin_vector_type (unsigned_intSI_type_node, V4SImode) ++#define LARCH_ATYPE_UV8HI \ ++ loongarch_builtin_vector_type (unsigned_intHI_type_node, V8HImode) ++#define LARCH_ATYPE_UV16QI \ ++ loongarch_builtin_vector_type (unsigned_intQI_type_node, V16QImode) ++ ++#define LARCH_ATYPE_UV4DI \ ++ loongarch_builtin_vector_type (long_long_unsigned_type_node, V4DImode) ++#define LARCH_ATYPE_UV8SI \ ++ loongarch_builtin_vector_type (unsigned_intSI_type_node, V8SImode) ++#define LARCH_ATYPE_UV16HI \ ++ loongarch_builtin_vector_type (unsigned_intHI_type_node, V16HImode) ++#define LARCH_ATYPE_UV32QI \ ++ loongarch_builtin_vector_type (unsigned_intQI_type_node, V32QImode) ++ ++#define LARCH_ATYPE_UV2SI \ ++ loongarch_builtin_vector_type (unsigned_intSI_type_node, V2SImode) ++#define LARCH_ATYPE_UV4HI \ ++ loongarch_builtin_vector_type (unsigned_intHI_type_node, V4HImode) ++#define LARCH_ATYPE_UV8QI \ ++ loongarch_builtin_vector_type (unsigned_intQI_type_node, V8QImode) ++ + /* LARCH_FTYPE_ATYPESN takes N LARCH_FTYPES-like type codes and lists + their associated LARCH_ATYPEs. */ + #define LARCH_FTYPE_ATYPES1(A, B) LARCH_ATYPE_##A, LARCH_ATYPE_##B +@@ -288,6 +1415,92 @@ loongarch_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED) + return loongarch_builtin_decls[code]; + } + ++/* Implement TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION. */ ++ ++tree ++loongarch_builtin_vectorized_function (unsigned int fn, tree type_out, ++ tree type_in) ++{ ++ machine_mode in_mode, out_mode; ++ int in_n, out_n; ++ ++ if (TREE_CODE (type_out) != VECTOR_TYPE ++ || TREE_CODE (type_in) != VECTOR_TYPE ++ || !ISA_HAS_LSX) ++ return NULL_TREE; ++ ++ out_mode = TYPE_MODE (TREE_TYPE (type_out)); ++ out_n = TYPE_VECTOR_SUBPARTS (type_out); ++ in_mode = TYPE_MODE (TREE_TYPE (type_in)); ++ in_n = TYPE_VECTOR_SUBPARTS (type_in); ++ ++ /* INSN is the name of the associated instruction pattern, without ++ the leading CODE_FOR_. */ ++#define LARCH_GET_BUILTIN(INSN) \ ++ loongarch_builtin_decls[loongarch_get_builtin_decl_index[CODE_FOR_##INSN]] ++ ++ switch (fn) ++ { ++ CASE_CFN_CEIL: ++ if (out_mode == DFmode && in_mode == DFmode) ++ { ++ if (out_n == 2 && in_n == 2) ++ return LARCH_GET_BUILTIN (lsx_vfrintrp_d); ++ } ++ if (out_mode == SFmode && in_mode == SFmode) ++ { ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lsx_vfrintrp_s); ++ } ++ break; ++ ++ CASE_CFN_TRUNC: ++ if (out_mode == DFmode && in_mode == DFmode) ++ { ++ if (out_n == 2 && in_n == 2) ++ return LARCH_GET_BUILTIN (lsx_vfrintrz_d); ++ } ++ if (out_mode == SFmode && in_mode == SFmode) ++ { ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lsx_vfrintrz_s); ++ } ++ break; ++ ++ CASE_CFN_RINT: ++ CASE_CFN_ROUND: ++ if (out_mode == DFmode && in_mode == DFmode) ++ { ++ if (out_n == 2 && in_n == 2) ++ return LARCH_GET_BUILTIN (lsx_vfrint_d); ++ } ++ if (out_mode == SFmode && in_mode == SFmode) ++ { ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lsx_vfrint_s); ++ } ++ break; ++ ++ CASE_CFN_FLOOR: ++ if (out_mode == DFmode && in_mode == DFmode) ++ { ++ if (out_n == 2 && in_n == 2) ++ return LARCH_GET_BUILTIN (lsx_vfrintrm_d); ++ } ++ if (out_mode == SFmode && in_mode == SFmode) ++ { ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lsx_vfrintrm_s); ++ } ++ break; ++ ++ default: ++ break; ++ } ++ ++ return NULL_TREE; ++} ++ + /* Take argument ARGNO from EXP's argument list and convert it into + an expand operand. Store the operand in *OP. */ + +@@ -323,7 +1536,236 @@ static rtx + loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, + struct expand_operand *ops, bool has_target_p) + { +- if (!maybe_expand_insn (icode, nops, ops)) ++ machine_mode imode; ++ int rangelo = 0, rangehi = 0, error_opno = 0; ++ ++ switch (icode) ++ { ++ case CODE_FOR_lsx_vaddi_bu: ++ case CODE_FOR_lsx_vaddi_hu: ++ case CODE_FOR_lsx_vaddi_wu: ++ case CODE_FOR_lsx_vaddi_du: ++ case CODE_FOR_lsx_vslti_bu: ++ case CODE_FOR_lsx_vslti_hu: ++ case CODE_FOR_lsx_vslti_wu: ++ case CODE_FOR_lsx_vslti_du: ++ case CODE_FOR_lsx_vslei_bu: ++ case CODE_FOR_lsx_vslei_hu: ++ case CODE_FOR_lsx_vslei_wu: ++ case CODE_FOR_lsx_vslei_du: ++ case CODE_FOR_lsx_vmaxi_bu: ++ case CODE_FOR_lsx_vmaxi_hu: ++ case CODE_FOR_lsx_vmaxi_wu: ++ case CODE_FOR_lsx_vmaxi_du: ++ case CODE_FOR_lsx_vmini_bu: ++ case CODE_FOR_lsx_vmini_hu: ++ case CODE_FOR_lsx_vmini_wu: ++ case CODE_FOR_lsx_vmini_du: ++ case CODE_FOR_lsx_vsubi_bu: ++ case CODE_FOR_lsx_vsubi_hu: ++ case CODE_FOR_lsx_vsubi_wu: ++ case CODE_FOR_lsx_vsubi_du: ++ gcc_assert (has_target_p && nops == 3); ++ /* We only generate a vector of constants iff the second argument ++ is an immediate. We also validate the range of the immediate. */ ++ if (CONST_INT_P (ops[2].value)) ++ { ++ rangelo = 0; ++ rangehi = 31; ++ if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi)) ++ { ++ ops[2].mode = ops[0].mode; ++ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, ++ INTVAL (ops[2].value)); ++ } ++ else ++ error_opno = 2; ++ } ++ break; ++ ++ case CODE_FOR_lsx_vseqi_b: ++ case CODE_FOR_lsx_vseqi_h: ++ case CODE_FOR_lsx_vseqi_w: ++ case CODE_FOR_lsx_vseqi_d: ++ case CODE_FOR_lsx_vslti_b: ++ case CODE_FOR_lsx_vslti_h: ++ case CODE_FOR_lsx_vslti_w: ++ case CODE_FOR_lsx_vslti_d: ++ case CODE_FOR_lsx_vslei_b: ++ case CODE_FOR_lsx_vslei_h: ++ case CODE_FOR_lsx_vslei_w: ++ case CODE_FOR_lsx_vslei_d: ++ case CODE_FOR_lsx_vmaxi_b: ++ case CODE_FOR_lsx_vmaxi_h: ++ case CODE_FOR_lsx_vmaxi_w: ++ case CODE_FOR_lsx_vmaxi_d: ++ case CODE_FOR_lsx_vmini_b: ++ case CODE_FOR_lsx_vmini_h: ++ case CODE_FOR_lsx_vmini_w: ++ case CODE_FOR_lsx_vmini_d: ++ gcc_assert (has_target_p && nops == 3); ++ /* We only generate a vector of constants iff the second argument ++ is an immediate. We also validate the range of the immediate. */ ++ if (CONST_INT_P (ops[2].value)) ++ { ++ rangelo = -16; ++ rangehi = 15; ++ if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi)) ++ { ++ ops[2].mode = ops[0].mode; ++ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, ++ INTVAL (ops[2].value)); ++ } ++ else ++ error_opno = 2; ++ } ++ break; ++ ++ case CODE_FOR_lsx_vandi_b: ++ case CODE_FOR_lsx_vori_b: ++ case CODE_FOR_lsx_vnori_b: ++ case CODE_FOR_lsx_vxori_b: ++ gcc_assert (has_target_p && nops == 3); ++ if (!CONST_INT_P (ops[2].value)) ++ break; ++ ops[2].mode = ops[0].mode; ++ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, ++ INTVAL (ops[2].value)); ++ break; ++ ++ case CODE_FOR_lsx_vbitseli_b: ++ gcc_assert (has_target_p && nops == 4); ++ if (!CONST_INT_P (ops[3].value)) ++ break; ++ ops[3].mode = ops[0].mode; ++ ops[3].value = loongarch_gen_const_int_vector (ops[3].mode, ++ INTVAL (ops[3].value)); ++ break; ++ ++ case CODE_FOR_lsx_vreplgr2vr_b: ++ case CODE_FOR_lsx_vreplgr2vr_h: ++ case CODE_FOR_lsx_vreplgr2vr_w: ++ case CODE_FOR_lsx_vreplgr2vr_d: ++ /* Map the built-ins to vector fill operations. We need fix up the mode ++ for the element being inserted. */ ++ gcc_assert (has_target_p && nops == 2); ++ imode = GET_MODE_INNER (ops[0].mode); ++ ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode); ++ ops[1].mode = imode; ++ break; ++ ++ case CODE_FOR_lsx_vilvh_b: ++ case CODE_FOR_lsx_vilvh_h: ++ case CODE_FOR_lsx_vilvh_w: ++ case CODE_FOR_lsx_vilvh_d: ++ case CODE_FOR_lsx_vilvl_b: ++ case CODE_FOR_lsx_vilvl_h: ++ case CODE_FOR_lsx_vilvl_w: ++ case CODE_FOR_lsx_vilvl_d: ++ case CODE_FOR_lsx_vpackev_b: ++ case CODE_FOR_lsx_vpackev_h: ++ case CODE_FOR_lsx_vpackev_w: ++ case CODE_FOR_lsx_vpackod_b: ++ case CODE_FOR_lsx_vpackod_h: ++ case CODE_FOR_lsx_vpackod_w: ++ case CODE_FOR_lsx_vpickev_b: ++ case CODE_FOR_lsx_vpickev_h: ++ case CODE_FOR_lsx_vpickev_w: ++ case CODE_FOR_lsx_vpickod_b: ++ case CODE_FOR_lsx_vpickod_h: ++ case CODE_FOR_lsx_vpickod_w: ++ /* Swap the operands 1 and 2 for interleave operations. Built-ins follow ++ convention of ISA, which have op1 as higher component and op2 as lower ++ component. However, the VEC_PERM op in tree and vec_concat in RTL ++ expects first operand to be lower component, because of which this ++ swap is needed for builtins. */ ++ gcc_assert (has_target_p && nops == 3); ++ std::swap (ops[1], ops[2]); ++ break; ++ ++ case CODE_FOR_lsx_vslli_b: ++ case CODE_FOR_lsx_vslli_h: ++ case CODE_FOR_lsx_vslli_w: ++ case CODE_FOR_lsx_vslli_d: ++ case CODE_FOR_lsx_vsrai_b: ++ case CODE_FOR_lsx_vsrai_h: ++ case CODE_FOR_lsx_vsrai_w: ++ case CODE_FOR_lsx_vsrai_d: ++ case CODE_FOR_lsx_vsrli_b: ++ case CODE_FOR_lsx_vsrli_h: ++ case CODE_FOR_lsx_vsrli_w: ++ case CODE_FOR_lsx_vsrli_d: ++ gcc_assert (has_target_p && nops == 3); ++ if (CONST_INT_P (ops[2].value)) ++ { ++ rangelo = 0; ++ rangehi = GET_MODE_UNIT_BITSIZE (ops[0].mode) - 1; ++ if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi)) ++ { ++ ops[2].mode = ops[0].mode; ++ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, ++ INTVAL (ops[2].value)); ++ } ++ else ++ error_opno = 2; ++ } ++ break; ++ ++ case CODE_FOR_lsx_vinsgr2vr_b: ++ case CODE_FOR_lsx_vinsgr2vr_h: ++ case CODE_FOR_lsx_vinsgr2vr_w: ++ case CODE_FOR_lsx_vinsgr2vr_d: ++ /* Map the built-ins to insert operations. We need to swap operands, ++ fix up the mode for the element being inserted, and generate ++ a bit mask for vec_merge. */ ++ gcc_assert (has_target_p && nops == 4); ++ std::swap (ops[1], ops[2]); ++ imode = GET_MODE_INNER (ops[0].mode); ++ ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode); ++ ops[1].mode = imode; ++ rangelo = 0; ++ rangehi = GET_MODE_NUNITS (ops[0].mode) - 1; ++ if (CONST_INT_P (ops[3].value) ++ && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi)) ++ ops[3].value = GEN_INT (1 << INTVAL (ops[3].value)); ++ else ++ error_opno = 2; ++ break; ++ ++ /* Map the built-ins to element insert operations. We need to swap ++ operands and generate a bit mask. */ ++ gcc_assert (has_target_p && nops == 4); ++ std::swap (ops[1], ops[2]); ++ std::swap (ops[1], ops[3]); ++ rangelo = 0; ++ rangehi = GET_MODE_NUNITS (ops[0].mode) - 1; ++ if (CONST_INT_P (ops[3].value) ++ && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi)) ++ ops[3].value = GEN_INT (1 << INTVAL (ops[3].value)); ++ else ++ error_opno = 2; ++ break; ++ ++ case CODE_FOR_lsx_vshuf4i_b: ++ case CODE_FOR_lsx_vshuf4i_h: ++ case CODE_FOR_lsx_vshuf4i_w: ++ case CODE_FOR_lsx_vshuf4i_w_f: ++ gcc_assert (has_target_p && nops == 3); ++ ops[2].value = loongarch_gen_const_int_vector_shuffle (ops[0].mode, ++ INTVAL (ops[2].value)); ++ break; ++ ++ default: ++ break; ++ } ++ ++ if (error_opno != 0) ++ { ++ error ("argument %d to the built-in must be a constant" ++ " in range %d to %d", error_opno, rangelo, rangehi); ++ return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx; ++ } ++ else if (!maybe_expand_insn (icode, nops, ops)) + { + error ("invalid argument to built-in function"); + return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx; +@@ -357,6 +1799,50 @@ loongarch_expand_builtin_direct (enum insn_code icode, rtx target, tree exp, + return loongarch_expand_builtin_insn (icode, opno, ops, has_target_p); + } + ++/* Expand an LSX built-in for a compare and branch instruction specified by ++ ICODE, set a general-purpose register to 1 if the branch was taken, ++ 0 otherwise. */ ++ ++static rtx ++loongarch_expand_builtin_lsx_test_branch (enum insn_code icode, tree exp) ++{ ++ struct expand_operand ops[3]; ++ rtx_insn *cbranch; ++ rtx_code_label *true_label, *done_label; ++ rtx cmp_result; ++ ++ true_label = gen_label_rtx (); ++ done_label = gen_label_rtx (); ++ ++ create_input_operand (&ops[0], true_label, TYPE_MODE (TREE_TYPE (exp))); ++ loongarch_prepare_builtin_arg (&ops[1], exp, 0); ++ create_fixed_operand (&ops[2], const0_rtx); ++ ++ /* Make sure that the operand 1 is a REG. */ ++ if (GET_CODE (ops[1].value) != REG) ++ ops[1].value = force_reg (ops[1].mode, ops[1].value); ++ ++ if ((cbranch = maybe_gen_insn (icode, 3, ops)) == NULL_RTX) ++ error ("failed to expand built-in function"); ++ ++ cmp_result = gen_reg_rtx (SImode); ++ ++ /* First assume that CMP_RESULT is false. */ ++ loongarch_emit_move (cmp_result, const0_rtx); ++ ++ /* Branch to TRUE_LABEL if CBRANCH is taken and DONE_LABEL otherwise. */ ++ emit_jump_insn (cbranch); ++ emit_jump_insn (gen_jump (done_label)); ++ emit_barrier (); ++ ++ /* Set CMP_RESULT to true if the branch was taken. */ ++ emit_label (true_label); ++ loongarch_emit_move (cmp_result, const1_rtx); ++ ++ emit_label (done_label); ++ return cmp_result; ++} ++ + /* Implement TARGET_EXPAND_BUILTIN. */ + + rtx +@@ -377,10 +1863,14 @@ loongarch_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, + switch (d->builtin_type) + { + case LARCH_BUILTIN_DIRECT: ++ case LARCH_BUILTIN_LSX: + return loongarch_expand_builtin_direct (d->icode, target, exp, true); + + case LARCH_BUILTIN_DIRECT_NO_TARGET: + return loongarch_expand_builtin_direct (d->icode, target, exp, false); ++ ++ case LARCH_BUILTIN_LSX_TEST_BRANCH: ++ return loongarch_expand_builtin_lsx_test_branch (d->icode, exp); + } + gcc_unreachable (); + } +diff --git a/gcc/config/loongarch/loongarch-ftypes.def b/gcc/config/loongarch/loongarch-ftypes.def +index 2babff414..2b0d50892 100644 +--- a/gcc/config/loongarch/loongarch-ftypes.def ++++ b/gcc/config/loongarch/loongarch-ftypes.def +@@ -32,7 +32,7 @@ along with GCC; see the file COPYING3. If not see + INT for integer_type_node + POINTER for ptr_type_node + +- (we don't use PTR because that's a ANSI-compatibillity macro). ++ (we don't use PTR because that's a ANSI-compatibility macro). + + Please keep this list lexicographically sorted by the LIST argument. */ + +@@ -63,3 +63,396 @@ DEF_LARCH_FTYPE (3, (VOID, USI, USI, SI)) + DEF_LARCH_FTYPE (3, (VOID, USI, UDI, SI)) + DEF_LARCH_FTYPE (3, (USI, USI, USI, USI)) + DEF_LARCH_FTYPE (3, (UDI, UDI, UDI, USI)) ++ ++DEF_LARCH_FTYPE (1, (DF, DF)) ++DEF_LARCH_FTYPE (2, (DF, DF, DF)) ++DEF_LARCH_FTYPE (1, (DF, V2DF)) ++ ++DEF_LARCH_FTYPE (1, (DI, DI)) ++DEF_LARCH_FTYPE (1, (DI, SI)) ++DEF_LARCH_FTYPE (1, (DI, UQI)) ++DEF_LARCH_FTYPE (2, (DI, DI, DI)) ++DEF_LARCH_FTYPE (2, (DI, DI, SI)) ++DEF_LARCH_FTYPE (3, (DI, DI, SI, SI)) ++DEF_LARCH_FTYPE (3, (DI, DI, USI, USI)) ++DEF_LARCH_FTYPE (3, (DI, DI, DI, QI)) ++DEF_LARCH_FTYPE (3, (DI, DI, V2HI, V2HI)) ++DEF_LARCH_FTYPE (3, (DI, DI, V4QI, V4QI)) ++DEF_LARCH_FTYPE (2, (DI, POINTER, SI)) ++DEF_LARCH_FTYPE (2, (DI, SI, SI)) ++DEF_LARCH_FTYPE (2, (DI, USI, USI)) ++ ++DEF_LARCH_FTYPE (2, (DI, V2DI, UQI)) ++ ++DEF_LARCH_FTYPE (2, (INT, DF, DF)) ++DEF_LARCH_FTYPE (2, (INT, SF, SF)) ++ ++DEF_LARCH_FTYPE (2, (INT, V2SF, V2SF)) ++DEF_LARCH_FTYPE (4, (INT, V2SF, V2SF, V2SF, V2SF)) ++ ++DEF_LARCH_FTYPE (1, (SF, SF)) ++DEF_LARCH_FTYPE (2, (SF, SF, SF)) ++DEF_LARCH_FTYPE (1, (SF, V2SF)) ++DEF_LARCH_FTYPE (1, (SF, V4SF)) ++ ++DEF_LARCH_FTYPE (2, (SI, POINTER, SI)) ++DEF_LARCH_FTYPE (1, (SI, SI)) ++DEF_LARCH_FTYPE (1, (SI, UDI)) ++DEF_LARCH_FTYPE (2, (QI, QI, QI)) ++DEF_LARCH_FTYPE (2, (HI, HI, HI)) ++DEF_LARCH_FTYPE (3, (SI, SI, SI, SI)) ++DEF_LARCH_FTYPE (3, (SI, SI, SI, QI)) ++DEF_LARCH_FTYPE (1, (SI, UQI)) ++DEF_LARCH_FTYPE (1, (SI, UV16QI)) ++DEF_LARCH_FTYPE (1, (SI, UV2DI)) ++DEF_LARCH_FTYPE (1, (SI, UV4SI)) ++DEF_LARCH_FTYPE (1, (SI, UV8HI)) ++DEF_LARCH_FTYPE (2, (SI, V16QI, UQI)) ++DEF_LARCH_FTYPE (1, (SI, V2HI)) ++DEF_LARCH_FTYPE (2, (SI, V2HI, V2HI)) ++DEF_LARCH_FTYPE (1, (SI, V4QI)) ++DEF_LARCH_FTYPE (2, (SI, V4QI, V4QI)) ++DEF_LARCH_FTYPE (2, (SI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (SI, V8HI, UQI)) ++DEF_LARCH_FTYPE (1, (SI, VOID)) ++ ++DEF_LARCH_FTYPE (2, (UDI, UDI, UDI)) ++DEF_LARCH_FTYPE (2, (UDI, UV2SI, UV2SI)) ++DEF_LARCH_FTYPE (2, (UDI, V2DI, UQI)) ++ ++DEF_LARCH_FTYPE (2, (USI, V16QI, UQI)) ++DEF_LARCH_FTYPE (2, (USI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (USI, V8HI, UQI)) ++DEF_LARCH_FTYPE (1, (USI, VOID)) ++ ++DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, UQI)) ++DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, USI)) ++DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, UV16QI, UQI)) ++DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, UV16QI, USI)) ++DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, V16QI)) ++ ++DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, UQI)) ++DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, UV2DI)) ++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV2DI, UQI)) ++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV2DI, UV2DI)) ++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, V2DI)) ++DEF_LARCH_FTYPE (2, (UV2DI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (1, (UV2DI, V2DF)) ++ ++DEF_LARCH_FTYPE (2, (UV2SI, UV2SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV2SI, UV2SI, UV2SI)) ++ ++DEF_LARCH_FTYPE (2, (UV4HI, UV4HI, UQI)) ++DEF_LARCH_FTYPE (2, (UV4HI, UV4HI, USI)) ++DEF_LARCH_FTYPE (2, (UV4HI, UV4HI, UV4HI)) ++DEF_LARCH_FTYPE (3, (UV4HI, UV4HI, UV4HI, UQI)) ++DEF_LARCH_FTYPE (3, (UV4HI, UV4HI, UV4HI, USI)) ++DEF_LARCH_FTYPE (1, (UV4HI, UV8QI)) ++DEF_LARCH_FTYPE (2, (UV4HI, UV8QI, UV8QI)) ++ ++DEF_LARCH_FTYPE (2, (UV4SI, UV4SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV4SI, UQI)) ++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (1, (UV4SI, V4SF)) ++ ++DEF_LARCH_FTYPE (2, (UV8HI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, UQI)) ++DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV8HI, UQI)) ++DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, V8HI)) ++ ++ ++ ++DEF_LARCH_FTYPE (2, (UV8QI, UV4HI, UV4HI)) ++DEF_LARCH_FTYPE (1, (UV8QI, UV8QI)) ++DEF_LARCH_FTYPE (2, (UV8QI, UV8QI, UV8QI)) ++ ++DEF_LARCH_FTYPE (2, (V16QI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (2, (V16QI, CVPOINTER, DI)) ++DEF_LARCH_FTYPE (1, (V16QI, HI)) ++DEF_LARCH_FTYPE (1, (V16QI, SI)) ++DEF_LARCH_FTYPE (2, (V16QI, UV16QI, UQI)) ++DEF_LARCH_FTYPE (2, (V16QI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (1, (V16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, QI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, SI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, USI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, UQI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, SI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, V16QI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, V16QI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, SI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, UQI)) ++DEF_LARCH_FTYPE (4, (V16QI, V16QI, V16QI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, USI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, V16QI)) ++ ++ ++DEF_LARCH_FTYPE (1, (V2DF, DF)) ++DEF_LARCH_FTYPE (1, (V2DF, UV2DI)) ++DEF_LARCH_FTYPE (1, (V2DF, V2DF)) ++DEF_LARCH_FTYPE (2, (V2DF, V2DF, V2DF)) ++DEF_LARCH_FTYPE (3, (V2DF, V2DF, V2DF, V2DF)) ++DEF_LARCH_FTYPE (2, (V2DF, V2DF, V2DI)) ++DEF_LARCH_FTYPE (1, (V2DF, V2DI)) ++DEF_LARCH_FTYPE (1, (V2DF, V4SF)) ++DEF_LARCH_FTYPE (1, (V2DF, V4SI)) ++ ++DEF_LARCH_FTYPE (2, (V2DI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (1, (V2DI, DI)) ++DEF_LARCH_FTYPE (1, (V2DI, HI)) ++DEF_LARCH_FTYPE (2, (V2DI, UV2DI, UQI)) ++DEF_LARCH_FTYPE (2, (V2DI, UV2DI, UV2DI)) ++DEF_LARCH_FTYPE (2, (V2DI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (1, (V2DI, V2DF)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DF, V2DF)) ++DEF_LARCH_FTYPE (1, (V2DI, V2DI)) ++DEF_LARCH_FTYPE (1, (UV2DI, UV2DI)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DI, QI)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DI, SI)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DI, UQI)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DI, USI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, DI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, V2DI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DI, V2DI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, SI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, UQI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, USI)) ++DEF_LARCH_FTYPE (4, (V2DI, V2DI, V2DI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, V2DI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (V2DI, V4SI, V4SI)) ++ ++DEF_LARCH_FTYPE (1, (V2HI, SI)) ++DEF_LARCH_FTYPE (2, (V2HI, SI, SI)) ++DEF_LARCH_FTYPE (3, (V2HI, SI, SI, SI)) ++DEF_LARCH_FTYPE (1, (V2HI, V2HI)) ++DEF_LARCH_FTYPE (2, (V2HI, V2HI, SI)) ++DEF_LARCH_FTYPE (2, (V2HI, V2HI, V2HI)) ++DEF_LARCH_FTYPE (1, (V2HI, V4QI)) ++DEF_LARCH_FTYPE (2, (V2HI, V4QI, V2HI)) ++ ++DEF_LARCH_FTYPE (2, (V2SF, SF, SF)) ++DEF_LARCH_FTYPE (1, (V2SF, V2SF)) ++DEF_LARCH_FTYPE (2, (V2SF, V2SF, V2SF)) ++DEF_LARCH_FTYPE (3, (V2SF, V2SF, V2SF, INT)) ++DEF_LARCH_FTYPE (4, (V2SF, V2SF, V2SF, V2SF, V2SF)) ++ ++DEF_LARCH_FTYPE (2, (V2SI, V2SI, UQI)) ++DEF_LARCH_FTYPE (2, (V2SI, V2SI, V2SI)) ++DEF_LARCH_FTYPE (2, (V2SI, V4HI, V4HI)) ++ ++DEF_LARCH_FTYPE (2, (V4HI, V2SI, V2SI)) ++DEF_LARCH_FTYPE (2, (V4HI, V4HI, UQI)) ++DEF_LARCH_FTYPE (2, (V4HI, V4HI, USI)) ++DEF_LARCH_FTYPE (2, (V4HI, V4HI, V4HI)) ++DEF_LARCH_FTYPE (3, (V4HI, V4HI, V4HI, UQI)) ++DEF_LARCH_FTYPE (3, (V4HI, V4HI, V4HI, USI)) ++ ++DEF_LARCH_FTYPE (1, (V4QI, SI)) ++DEF_LARCH_FTYPE (2, (V4QI, V2HI, V2HI)) ++DEF_LARCH_FTYPE (1, (V4QI, V4QI)) ++DEF_LARCH_FTYPE (2, (V4QI, V4QI, SI)) ++DEF_LARCH_FTYPE (2, (V4QI, V4QI, V4QI)) ++ ++DEF_LARCH_FTYPE (1, (V4SF, SF)) ++DEF_LARCH_FTYPE (1, (V4SF, UV4SI)) ++DEF_LARCH_FTYPE (2, (V4SF, V2DF, V2DF)) ++DEF_LARCH_FTYPE (1, (V4SF, V4SF)) ++DEF_LARCH_FTYPE (2, (V4SF, V4SF, V4SF)) ++DEF_LARCH_FTYPE (3, (V4SF, V4SF, V4SF, V4SF)) ++DEF_LARCH_FTYPE (2, (V4SF, V4SF, V4SI)) ++DEF_LARCH_FTYPE (1, (V4SF, V4SI)) ++DEF_LARCH_FTYPE (1, (V4SF, V8HI)) ++ ++DEF_LARCH_FTYPE (2, (V4SI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (1, (V4SI, HI)) ++DEF_LARCH_FTYPE (1, (V4SI, SI)) ++DEF_LARCH_FTYPE (2, (V4SI, UV4SI, UQI)) ++DEF_LARCH_FTYPE (2, (V4SI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (2, (V4SI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (V4SI, V2DF, V2DF)) ++DEF_LARCH_FTYPE (1, (V4SI, V4SF)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SF, V4SF)) ++DEF_LARCH_FTYPE (1, (V4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SI, QI)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SI, SI)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SI, USI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, SI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, V4SI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SI, V4SI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, SI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, UQI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, USI)) ++DEF_LARCH_FTYPE (4, (V4SI, V4SI, V4SI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, V4SI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (V4SI, V8HI, V8HI)) ++ ++DEF_LARCH_FTYPE (2, (V8HI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (1, (V8HI, HI)) ++DEF_LARCH_FTYPE (1, (V8HI, SI)) ++DEF_LARCH_FTYPE (2, (V8HI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (V8HI, UV8HI, UQI)) ++DEF_LARCH_FTYPE (2, (V8HI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (V8HI, V16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (V8HI, V4SF, V4SF)) ++DEF_LARCH_FTYPE (1, (V8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (V8HI, V8HI, QI)) ++DEF_LARCH_FTYPE (2, (V8HI, V8HI, SI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, SI, UQI)) ++DEF_LARCH_FTYPE (2, (V8HI, V8HI, UQI)) ++DEF_LARCH_FTYPE (2, (V8HI, V8HI, USI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, SI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, V8HI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (V8HI, V8HI, V8HI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, SI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, UQI)) ++DEF_LARCH_FTYPE (4, (V8HI, V8HI, V8HI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, USI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, V8HI)) ++ ++DEF_LARCH_FTYPE (2, (V8QI, V4HI, V4HI)) ++DEF_LARCH_FTYPE (1, (V8QI, V8QI)) ++DEF_LARCH_FTYPE (2, (V8QI, V8QI, V8QI)) ++ ++DEF_LARCH_FTYPE (2, (VOID, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (VOID, SI, SI)) ++DEF_LARCH_FTYPE (2, (VOID, UQI, SI)) ++DEF_LARCH_FTYPE (2, (VOID, USI, UQI)) ++DEF_LARCH_FTYPE (1, (VOID, UHI)) ++DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, DI)) ++DEF_LARCH_FTYPE (3, (VOID, V2DF, POINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V2DI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (2, (VOID, V2HI, V2HI)) ++DEF_LARCH_FTYPE (2, (VOID, V4QI, V4QI)) ++DEF_LARCH_FTYPE (3, (VOID, V4SF, POINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V4SI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V8HI, CVPOINTER, SI)) ++ ++DEF_LARCH_FTYPE (1, (V8HI, V16QI)) ++DEF_LARCH_FTYPE (1, (V4SI, V16QI)) ++DEF_LARCH_FTYPE (1, (V2DI, V16QI)) ++DEF_LARCH_FTYPE (1, (V4SI, V8HI)) ++DEF_LARCH_FTYPE (1, (V2DI, V8HI)) ++DEF_LARCH_FTYPE (1, (V2DI, V4SI)) ++DEF_LARCH_FTYPE (1, (UV8HI, V16QI)) ++DEF_LARCH_FTYPE (1, (UV4SI, V16QI)) ++DEF_LARCH_FTYPE (1, (UV2DI, V16QI)) ++DEF_LARCH_FTYPE (1, (UV4SI, V8HI)) ++DEF_LARCH_FTYPE (1, (UV2DI, V8HI)) ++DEF_LARCH_FTYPE (1, (UV2DI, V4SI)) ++DEF_LARCH_FTYPE (1, (UV8HI, UV16QI)) ++DEF_LARCH_FTYPE (1, (UV4SI, UV16QI)) ++DEF_LARCH_FTYPE (1, (UV2DI, UV16QI)) ++DEF_LARCH_FTYPE (1, (UV4SI, UV8HI)) ++DEF_LARCH_FTYPE (1, (UV2DI, UV8HI)) ++DEF_LARCH_FTYPE (1, (UV2DI, UV4SI)) ++DEF_LARCH_FTYPE (2, (UV8HI, V16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (UV4SI, V8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (UV2DI, V4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (V8HI, V16QI, UQI)) ++DEF_LARCH_FTYPE (2, (V4SI, V8HI, UQI)) ++DEF_LARCH_FTYPE (2, (V2DI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV8HI, UV16QI, UQI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV8HI, UQI)) ++DEF_LARCH_FTYPE (2, (UV2DI, UV4SI, UQI)) ++DEF_LARCH_FTYPE (2, (V16QI, V8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (V8HI, V4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (V4SI, V2DI, V2DI)) ++DEF_LARCH_FTYPE (2, (UV16QI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (UV8HI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV2DI, UV2DI)) ++DEF_LARCH_FTYPE (2, (V16QI, V8HI, UQI)) ++DEF_LARCH_FTYPE (2, (V8HI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (V4SI, V2DI, UQI)) ++DEF_LARCH_FTYPE (2, (UV16QI, UV8HI, UQI)) ++DEF_LARCH_FTYPE (2, (UV8HI, UV4SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV2DI, UQI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, DI)) ++DEF_LARCH_FTYPE (2, (V16QI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, UQI)) ++DEF_LARCH_FTYPE (2, (V4SF, V2DI, V2DI)) ++DEF_LARCH_FTYPE (1, (V2DI, V4SF)) ++DEF_LARCH_FTYPE (2, (V2DI, UQI, USI)) ++DEF_LARCH_FTYPE (2, (V2DI, UQI, UQI)) ++DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V16QI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V8HI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V4SI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V2DI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V16QI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V8HI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V4SI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V2DI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V8HI, UV16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (UV16QI, V16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (V8HI, V8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (UV8HI, V8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SI, UV4SI)) ++DEF_LARCH_FTYPE (2, (UV4SI, V4SI, UV4SI)) ++DEF_LARCH_FTYPE (2, (V4SI, V16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (V4SI, UV16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DI, UV2DI)) ++DEF_LARCH_FTYPE (2, (UV2DI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (V4SI, UV8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (V2DI, UV4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (V2DI, UV2DI, V2DI)) ++DEF_LARCH_FTYPE (2, (V2DI, V8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (V2DI, UV8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (UV2DI, V2DI, UV2DI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV8HI, V8HI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV2DI, V2DI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV4SI, V4SI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V8HI, V8HI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV8HI, V8HI)) ++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UV16QI, V16QI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V16QI, V16QI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV16QI, V16QI)) ++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV16QI, UV16QI)) ++ ++DEF_LARCH_FTYPE(4,(VOID,V16QI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE(4,(VOID,V8HI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE(4,(VOID,V4SI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE(4,(VOID,V2DI,CVPOINTER,SI,UQI)) ++ ++DEF_LARCH_FTYPE (2, (DI, V16QI, UQI)) ++DEF_LARCH_FTYPE (2, (DI, V8HI, UQI)) ++DEF_LARCH_FTYPE (2, (DI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (UDI, V16QI, UQI)) ++DEF_LARCH_FTYPE (2, (UDI, V8HI, UQI)) ++DEF_LARCH_FTYPE (2, (UDI, V4SI, UQI)) ++ ++DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, V16QI, USI)) ++DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, V8HI, USI)) ++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, V4SI, USI)) ++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, V2DI, USI)) ++ ++DEF_LARCH_FTYPE (1, (BOOLEAN,V16QI)) ++DEF_LARCH_FTYPE(2,(V16QI,CVPOINTER,CVPOINTER)) ++DEF_LARCH_FTYPE(3,(VOID,V16QI,CVPOINTER,CVPOINTER)) ++ ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, SI, UQI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, SI, UQI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, DI, UQI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, SI, UQI)) +diff --git a/gcc/config/loongarch/lsxintrin.h b/gcc/config/loongarch/lsxintrin.h +new file mode 100644 +index 000000000..ec4206990 +--- /dev/null ++++ b/gcc/config/loongarch/lsxintrin.h +@@ -0,0 +1,5181 @@ ++/* LARCH Loongson SX intrinsics include file. ++ ++ Copyright (C) 2018 Free Software Foundation, Inc. ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published ++ by the Free Software Foundation; either version 3, or (at your ++ option) any later version. ++ ++ GCC is distributed in the hope that it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++ License for more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++#ifndef _GCC_LOONGSON_SXINTRIN_H ++#define _GCC_LOONGSON_SXINTRIN_H 1 ++ ++#if defined(__loongarch_sx) ++typedef signed char v16i8 __attribute__ ((vector_size(16), aligned(16))); ++typedef signed char v16i8_b __attribute__ ((vector_size(16), aligned(1))); ++typedef unsigned char v16u8 __attribute__ ((vector_size(16), aligned(16))); ++typedef unsigned char v16u8_b __attribute__ ((vector_size(16), aligned(1))); ++typedef short v8i16 __attribute__ ((vector_size(16), aligned(16))); ++typedef short v8i16_h __attribute__ ((vector_size(16), aligned(2))); ++typedef unsigned short v8u16 __attribute__ ((vector_size(16), aligned(16))); ++typedef unsigned short v8u16_h __attribute__ ((vector_size(16), aligned(2))); ++typedef int v4i32 __attribute__ ((vector_size(16), aligned(16))); ++typedef int v4i32_w __attribute__ ((vector_size(16), aligned(4))); ++typedef unsigned int v4u32 __attribute__ ((vector_size(16), aligned(16))); ++typedef unsigned int v4u32_w __attribute__ ((vector_size(16), aligned(4))); ++typedef long long v2i64 __attribute__ ((vector_size(16), aligned(16))); ++typedef long long v2i64_d __attribute__ ((vector_size(16), aligned(8))); ++typedef unsigned long long v2u64 __attribute__ ((vector_size(16), aligned(16))); ++typedef unsigned long long v2u64_d __attribute__ ((vector_size(16), aligned(8))); ++typedef float v4f32 __attribute__ ((vector_size(16), aligned(16))); ++typedef float v4f32_w __attribute__ ((vector_size(16), aligned(4))); ++typedef double v2f64 __attribute__ ((vector_size(16), aligned(16))); ++typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8))); ++ ++typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__)); ++typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); ++typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__)); ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsll_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsll_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsll_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsll_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsll_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsll_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsll_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsll_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vslli_b(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vslli_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vslli_h(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vslli_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vslli_w(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslli_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vslli_d(/*__m128i*/ _1, /*ui6*/ _2) \ ++ ((__m128i)__builtin_lsx_vslli_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsra_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsra_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsra_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsra_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsra_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsra_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsra_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsra_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vsrai_b(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrai_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vsrai_h(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrai_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vsrai_w(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrai_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vsrai_d(/*__m128i*/ _1, /*ui6*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrai_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrar_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrar_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrar_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrar_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrar_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrar_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrar_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrar_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vsrari_b(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrari_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vsrari_h(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrari_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vsrari_w(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrari_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vsrari_d(/*__m128i*/ _1, /*ui6*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrari_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrl_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrl_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrl_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrl_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrl_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrl_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrl_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrl_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vsrli_b(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrli_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vsrli_h(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrli_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vsrli_w(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrli_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vsrli_d(/*__m128i*/ _1, /*ui6*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrli_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlr_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlr_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlr_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlr_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlr_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlr_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlr_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlr_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vsrlri_b(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrlri_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vsrlri_h(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrlri_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vsrlri_w(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrlri_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vsrlri_d(/*__m128i*/ _1, /*ui6*/ _2) \ ++ ((__m128i)__builtin_lsx_vsrlri_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitclr_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitclr_b ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitclr_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitclr_h ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitclr_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitclr_w ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitclr_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitclr_d ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vbitclri_b(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vbitclri_b ((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ ++#define __lsx_vbitclri_h(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vbitclri_h ((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ ++#define __lsx_vbitclri_w(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vbitclri_w ((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ ++#define __lsx_vbitclri_d(/*__m128i*/ _1, /*ui6*/ _2) \ ++ ((__m128i)__builtin_lsx_vbitclri_d ((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitset_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitset_b ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitset_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitset_h ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitset_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitset_w ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitset_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitset_d ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vbitseti_b(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vbitseti_b ((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ ++#define __lsx_vbitseti_h(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vbitseti_h ((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ ++#define __lsx_vbitseti_w(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vbitseti_w ((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ ++#define __lsx_vbitseti_d(/*__m128i*/ _1, /*ui6*/ _2) \ ++ ((__m128i)__builtin_lsx_vbitseti_d ((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitrev_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitrev_b ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitrev_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitrev_h ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitrev_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitrev_w ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitrev_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitrev_d ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vbitrevi_b(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vbitrevi_b ((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ ++#define __lsx_vbitrevi_h(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vbitrevi_h ((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ ++#define __lsx_vbitrevi_w(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vbitrevi_w ((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ ++#define __lsx_vbitrevi_d(/*__m128i*/ _1, /*ui6*/ _2) \ ++ ((__m128i)__builtin_lsx_vbitrevi_d ((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadd_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadd_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadd_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadd_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadd_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadd_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadd_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadd_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vaddi_bu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vaddi_bu ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vaddi_hu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vaddi_hu ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vaddi_wu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vaddi_wu ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vaddi_du(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vaddi_du ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsub_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsub_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsub_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsub_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsub_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsub_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsub_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsub_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vsubi_bu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vsubi_bu ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vsubi_hu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vsubi_hu ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vsubi_wu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vsubi_wu ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vsubi_du(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vsubi_du ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V16QI, V16QI, QI. */ ++#define __lsx_vmaxi_b(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmaxi_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V8HI, V8HI, QI. */ ++#define __lsx_vmaxi_h(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmaxi_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V4SI, V4SI, QI. */ ++#define __lsx_vmaxi_w(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmaxi_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V2DI, V2DI, QI. */ ++#define __lsx_vmaxi_d(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmaxi_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vmaxi_bu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmaxi_bu ((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ ++#define __lsx_vmaxi_hu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmaxi_hu ((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ ++#define __lsx_vmaxi_wu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmaxi_wu ((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ ++#define __lsx_vmaxi_du(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmaxi_du ((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V16QI, V16QI, QI. */ ++#define __lsx_vmini_b(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmini_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V8HI, V8HI, QI. */ ++#define __lsx_vmini_h(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmini_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V4SI, V4SI, QI. */ ++#define __lsx_vmini_w(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmini_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V2DI, V2DI, QI. */ ++#define __lsx_vmini_d(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmini_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vmini_bu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmini_bu ((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ ++#define __lsx_vmini_hu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmini_hu ((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ ++#define __lsx_vmini_wu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmini_wu ((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ ++#define __lsx_vmini_du(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vmini_du ((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vseq_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vseq_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vseq_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vseq_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vseq_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vseq_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vseq_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vseq_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V16QI, V16QI, QI. */ ++#define __lsx_vseqi_b(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vseqi_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V8HI, V8HI, QI. */ ++#define __lsx_vseqi_h(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vseqi_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V4SI, V4SI, QI. */ ++#define __lsx_vseqi_w(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vseqi_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V2DI, V2DI, QI. */ ++#define __lsx_vseqi_d(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vseqi_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V16QI, V16QI, QI. */ ++#define __lsx_vslti_b(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslti_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V8HI, V8HI, QI. */ ++#define __lsx_vslti_h(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslti_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V4SI, V4SI, QI. */ ++#define __lsx_vslti_w(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslti_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V2DI, V2DI, QI. */ ++#define __lsx_vslti_d(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslti_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, UV16QI, UQI. */ ++#define __lsx_vslti_bu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslti_bu ((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, UV8HI, UQI. */ ++#define __lsx_vslti_hu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslti_hu ((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, UV4SI, UQI. */ ++#define __lsx_vslti_wu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslti_wu ((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V2DI, UV2DI, UQI. */ ++#define __lsx_vslti_du(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslti_du ((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V16QI, V16QI, QI. */ ++#define __lsx_vslei_b(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslei_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V8HI, V8HI, QI. */ ++#define __lsx_vslei_h(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslei_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V4SI, V4SI, QI. */ ++#define __lsx_vslei_w(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslei_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V2DI, V2DI, QI. */ ++#define __lsx_vslei_d(/*__m128i*/ _1, /*si5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslei_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, UV16QI, UQI. */ ++#define __lsx_vslei_bu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslei_bu ((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, UV8HI, UQI. */ ++#define __lsx_vslei_hu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslei_hu ((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, UV4SI, UQI. */ ++#define __lsx_vslei_wu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslei_wu ((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V2DI, UV2DI, UQI. */ ++#define __lsx_vslei_du(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vslei_du ((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vsat_b(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vsat_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vsat_h(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vsat_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vsat_w(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vsat_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vsat_d(/*__m128i*/ _1, /*ui6*/ _2) \ ++ ((__m128i)__builtin_lsx_vsat_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vsat_bu(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vsat_bu ((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ ++#define __lsx_vsat_hu(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vsat_hu ((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ ++#define __lsx_vsat_wu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vsat_wu ((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ ++#define __lsx_vsat_du(/*__m128i*/ _1, /*ui6*/ _2) \ ++ ((__m128i)__builtin_lsx_vsat_du ((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadda_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadda_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadda_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadda_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadda_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadda_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadda_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadda_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmul_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmul_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmul_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmul_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmul_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmul_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmul_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmul_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmadd_b (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmadd_b ((v16i8)_1, (v16i8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmadd_h (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmadd_h ((v8i16)_1, (v8i16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmadd_w (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmadd_w ((v4i32)_1, (v4i32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmadd_d (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmadd_d ((v2i64)_1, (v2i64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmsub_b (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmsub_b ((v16i8)_1, (v16i8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmsub_h (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmsub_h ((v8i16)_1, (v8i16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmsub_w (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmsub_w ((v4i32)_1, (v4i32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmsub_d (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmsub_d ((v2i64)_1, (v2i64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_h_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_h_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_w_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_w_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_d_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_d_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_hu_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_hu_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_wu_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_wu_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_du_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_du_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_h_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_h_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_w_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_w_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_d_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_d_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_hu_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_hu_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_wu_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_wu_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_du_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_du_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, rk. */ ++/* Data types in instruction templates: V16QI, V16QI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplve_b (__m128i _1, int _2) ++{ ++ return (__m128i)__builtin_lsx_vreplve_b ((v16i8)_1, (int)_2); ++} ++ ++/* Assembly instruction format: vd, vj, rk. */ ++/* Data types in instruction templates: V8HI, V8HI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplve_h (__m128i _1, int _2) ++{ ++ return (__m128i)__builtin_lsx_vreplve_h ((v8i16)_1, (int)_2); ++} ++ ++/* Assembly instruction format: vd, vj, rk. */ ++/* Data types in instruction templates: V4SI, V4SI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplve_w (__m128i _1, int _2) ++{ ++ return (__m128i)__builtin_lsx_vreplve_w ((v4i32)_1, (int)_2); ++} ++ ++/* Assembly instruction format: vd, vj, rk. */ ++/* Data types in instruction templates: V2DI, V2DI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplve_d (__m128i _1, int _2) ++{ ++ return (__m128i)__builtin_lsx_vreplve_d ((v2i64)_1, (int)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vreplvei_b(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vreplvei_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vreplvei_h(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vreplvei_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui2. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vreplvei_w(/*__m128i*/ _1, /*ui2*/ _2) \ ++ ((__m128i)__builtin_lsx_vreplvei_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui1. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vreplvei_d(/*__m128i*/ _1, /*ui1*/ _2) \ ++ ((__m128i)__builtin_lsx_vreplvei_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickev_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickev_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickev_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickev_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickev_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickev_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickev_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickev_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickod_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickod_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickod_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickod_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickod_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickod_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickod_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickod_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvh_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvh_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvh_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvh_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvh_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvh_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvh_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvh_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvl_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvl_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvl_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvl_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvl_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvl_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvl_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvl_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackev_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackev_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackev_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackev_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackev_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackev_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackev_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackev_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackod_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackod_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackod_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackod_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackod_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackod_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackod_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackod_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vshuf_h (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vshuf_h ((v8i16)_1, (v8i16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vshuf_w (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vshuf_w ((v4i32)_1, (v4i32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vshuf_d (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vshuf_d ((v2i64)_1, (v2i64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vand_v (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vand_v ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vandi_b(/*__m128i*/ _1, /*ui8*/ _2) \ ++ ((__m128i)__builtin_lsx_vandi_b ((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vor_v (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vor_v ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vori_b(/*__m128i*/ _1, /*ui8*/ _2) \ ++ ((__m128i)__builtin_lsx_vori_b ((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vnor_v (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vnor_v ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vnori_b(/*__m128i*/ _1, /*ui8*/ _2) \ ++ ((__m128i)__builtin_lsx_vnori_b ((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vxor_v (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vxor_v ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vxori_b(/*__m128i*/ _1, /*ui8*/ _2) \ ++ ((__m128i)__builtin_lsx_vxori_b ((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitsel_v (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vbitsel_v ((v16u8)_1, (v16u8)_2, (v16u8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI, USI. */ ++#define __lsx_vbitseli_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ ++ ((__m128i)__builtin_lsx_vbitseli_b ((v16u8)(_1), (v16u8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V16QI, V16QI, USI. */ ++#define __lsx_vshuf4i_b(/*__m128i*/ _1, /*ui8*/ _2) \ ++ ((__m128i)__builtin_lsx_vshuf4i_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V8HI, V8HI, USI. */ ++#define __lsx_vshuf4i_h(/*__m128i*/ _1, /*ui8*/ _2) \ ++ ((__m128i)__builtin_lsx_vshuf4i_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V4SI, V4SI, USI. */ ++#define __lsx_vshuf4i_w(/*__m128i*/ _1, /*ui8*/ _2) \ ++ ((__m128i)__builtin_lsx_vshuf4i_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, rj. */ ++/* Data types in instruction templates: V16QI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplgr2vr_b (int _1) ++{ ++ return (__m128i)__builtin_lsx_vreplgr2vr_b ((int)_1); ++} ++ ++/* Assembly instruction format: vd, rj. */ ++/* Data types in instruction templates: V8HI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplgr2vr_h (int _1) ++{ ++ return (__m128i)__builtin_lsx_vreplgr2vr_h ((int)_1); ++} ++ ++/* Assembly instruction format: vd, rj. */ ++/* Data types in instruction templates: V4SI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplgr2vr_w (int _1) ++{ ++ return (__m128i)__builtin_lsx_vreplgr2vr_w ((int)_1); ++} ++ ++/* Assembly instruction format: vd, rj. */ ++/* Data types in instruction templates: V2DI, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplgr2vr_d (long int _1) ++{ ++ return (__m128i)__builtin_lsx_vreplgr2vr_d ((long int)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpcnt_b (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vpcnt_b ((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpcnt_h (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vpcnt_h ((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpcnt_w (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vpcnt_w ((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpcnt_d (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vpcnt_d ((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclo_b (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclo_b ((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclo_h (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclo_h ((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclo_w (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclo_w ((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclo_d (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclo_d ((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclz_b (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclz_b ((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclz_h (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclz_h ((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclz_w (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclz_w ((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclz_d (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclz_d ((v2i64)_1); ++} ++ ++/* Assembly instruction format: rd, vj, ui4. */ ++/* Data types in instruction templates: SI, V16QI, UQI. */ ++#define __lsx_vpickve2gr_b(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((int)__builtin_lsx_vpickve2gr_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui3. */ ++/* Data types in instruction templates: SI, V8HI, UQI. */ ++#define __lsx_vpickve2gr_h(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((int)__builtin_lsx_vpickve2gr_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui2. */ ++/* Data types in instruction templates: SI, V4SI, UQI. */ ++#define __lsx_vpickve2gr_w(/*__m128i*/ _1, /*ui2*/ _2) \ ++ ((int)__builtin_lsx_vpickve2gr_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui1. */ ++/* Data types in instruction templates: DI, V2DI, UQI. */ ++#define __lsx_vpickve2gr_d(/*__m128i*/ _1, /*ui1*/ _2) \ ++ ((long int)__builtin_lsx_vpickve2gr_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui4. */ ++/* Data types in instruction templates: USI, V16QI, UQI. */ ++#define __lsx_vpickve2gr_bu(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((unsigned int)__builtin_lsx_vpickve2gr_bu ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui3. */ ++/* Data types in instruction templates: USI, V8HI, UQI. */ ++#define __lsx_vpickve2gr_hu(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((unsigned int)__builtin_lsx_vpickve2gr_hu ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui2. */ ++/* Data types in instruction templates: USI, V4SI, UQI. */ ++#define __lsx_vpickve2gr_wu(/*__m128i*/ _1, /*ui2*/ _2) \ ++ ((unsigned int)__builtin_lsx_vpickve2gr_wu ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui1. */ ++/* Data types in instruction templates: UDI, V2DI, UQI. */ ++#define __lsx_vpickve2gr_du(/*__m128i*/ _1, /*ui1*/ _2) \ ++ ((unsigned long int)__builtin_lsx_vpickve2gr_du ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, rj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, SI, UQI. */ ++#define __lsx_vinsgr2vr_b(/*__m128i*/ _1, /*int*/ _2, /*ui4*/ _3) \ ++ ((__m128i)__builtin_lsx_vinsgr2vr_b ((v16i8)(_1), (int)(_2), (_3))) ++ ++/* Assembly instruction format: vd, rj, ui3. */ ++/* Data types in instruction templates: V8HI, V8HI, SI, UQI. */ ++#define __lsx_vinsgr2vr_h(/*__m128i*/ _1, /*int*/ _2, /*ui3*/ _3) \ ++ ((__m128i)__builtin_lsx_vinsgr2vr_h ((v8i16)(_1), (int)(_2), (_3))) ++ ++/* Assembly instruction format: vd, rj, ui2. */ ++/* Data types in instruction templates: V4SI, V4SI, SI, UQI. */ ++#define __lsx_vinsgr2vr_w(/*__m128i*/ _1, /*int*/ _2, /*ui2*/ _3) \ ++ ((__m128i)__builtin_lsx_vinsgr2vr_w ((v4i32)(_1), (int)(_2), (_3))) ++ ++/* Assembly instruction format: vd, rj, ui1. */ ++/* Data types in instruction templates: V2DI, V2DI, DI, UQI. */ ++#define __lsx_vinsgr2vr_d(/*__m128i*/ _1, /*long int*/ _2, /*ui1*/ _3) \ ++ ((__m128i)__builtin_lsx_vinsgr2vr_d ((v2i64)(_1), (long int)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfadd_s (__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfadd_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfadd_d (__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfadd_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfsub_s (__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfsub_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfsub_d (__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfsub_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmul_s (__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfmul_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmul_d (__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfmul_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfdiv_s (__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfdiv_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfdiv_d (__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfdiv_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcvt_h_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcvt_h_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfcvt_s_d (__m128d _1, __m128d _2) ++{ ++ return (__m128)__builtin_lsx_vfcvt_s_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmin_s (__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfmin_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmin_d (__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfmin_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmina_s (__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfmina_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmina_d (__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfmina_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmax_s (__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfmax_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmax_d (__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfmax_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmaxa_s (__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfmaxa_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmaxa_d (__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfmaxa_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfclass_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vfclass_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfclass_d (__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vfclass_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfsqrt_s (__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vfsqrt_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfsqrt_d (__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vfsqrt_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfrecip_s (__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vfrecip_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfrecip_d (__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vfrecip_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfrint_s (__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vfrint_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfrint_d (__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vfrint_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfrsqrt_s (__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vfrsqrt_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfrsqrt_d (__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vfrsqrt_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vflogb_s (__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vflogb_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vflogb_d (__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vflogb_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfcvth_s_h (__m128i _1) ++{ ++ return (__m128)__builtin_lsx_vfcvth_s_h ((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfcvth_d_s (__m128 _1) ++{ ++ return (__m128d)__builtin_lsx_vfcvth_d_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfcvtl_s_h (__m128i _1) ++{ ++ return (__m128)__builtin_lsx_vfcvtl_s_h ((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfcvtl_d_s (__m128 _1) ++{ ++ return (__m128d)__builtin_lsx_vfcvtl_d_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftint_w_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftint_w_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftint_l_d (__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftint_l_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftint_wu_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftint_wu_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftint_lu_d (__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftint_lu_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrz_w_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrz_w_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrz_l_d (__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrz_l_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrz_wu_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrz_wu_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrz_lu_d (__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrz_lu_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vffint_s_w (__m128i _1) ++{ ++ return (__m128)__builtin_lsx_vffint_s_w ((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vffint_d_l (__m128i _1) ++{ ++ return (__m128d)__builtin_lsx_vffint_d_l ((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vffint_s_wu (__m128i _1) ++{ ++ return (__m128)__builtin_lsx_vffint_s_wu ((v4u32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vffint_d_lu (__m128i _1) ++{ ++ return (__m128d)__builtin_lsx_vffint_d_lu ((v2u64)_1); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vandn_v (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vandn_v ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vneg_b (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vneg_b ((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vneg_h (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vneg_h ((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vneg_w (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vneg_w ((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vneg_d (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vneg_d ((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V8HI, V16QI, UQI. */ ++#define __lsx_vsllwil_h_b(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vsllwil_h_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V4SI, V8HI, UQI. */ ++#define __lsx_vsllwil_w_h(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vsllwil_w_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V2DI, V4SI, UQI. */ ++#define __lsx_vsllwil_d_w(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vsllwil_d_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: UV8HI, UV16QI, UQI. */ ++#define __lsx_vsllwil_hu_bu(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vsllwil_hu_bu ((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV4SI, UV8HI, UQI. */ ++#define __lsx_vsllwil_wu_hu(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vsllwil_wu_hu ((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV2DI, UV4SI, UQI. */ ++#define __lsx_vsllwil_du_wu(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vsllwil_du_wu ((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsran_b_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsran_b_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsran_h_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsran_h_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsran_w_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsran_w_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssran_b_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssran_b_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssran_h_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssran_h_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssran_w_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssran_w_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssran_bu_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssran_bu_h ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssran_hu_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssran_hu_w ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssran_wu_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssran_wu_d ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrarn_b_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrarn_b_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrarn_h_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrarn_h_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrarn_w_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrarn_w_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrarn_b_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrarn_b_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrarn_h_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrarn_h_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrarn_w_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrarn_w_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrarn_bu_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrarn_bu_h ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrarn_hu_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrarn_hu_w ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrarn_wu_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrarn_wu_d ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrln_b_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrln_b_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrln_h_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrln_h_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrln_w_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrln_w_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrln_bu_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrln_bu_h ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrln_hu_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrln_hu_w ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrln_wu_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrln_wu_d ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlrn_b_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlrn_b_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlrn_h_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlrn_h_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlrn_w_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlrn_w_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrlrn_bu_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrlrn_bu_h ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrlrn_hu_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrlrn_hu_w ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrlrn_wu_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrlrn_wu_d ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, UQI. */ ++#define __lsx_vfrstpi_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vfrstpi_b ((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, UQI. */ ++#define __lsx_vfrstpi_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vfrstpi_h ((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfrstp_b (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vfrstp_b ((v16i8)_1, (v16i8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfrstp_h (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vfrstp_h ((v8i16)_1, (v8i16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vshuf4i_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ ++ ((__m128i)__builtin_lsx_vshuf4i_d ((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vbsrl_v(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vbsrl_v ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vbsll_v(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vbsll_v ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vextrins_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ ++ ((__m128i)__builtin_lsx_vextrins_b ((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vextrins_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ ++ ((__m128i)__builtin_lsx_vextrins_h ((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vextrins_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ ++ ((__m128i)__builtin_lsx_vextrins_w ((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vextrins_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ ++ ((__m128i)__builtin_lsx_vextrins_d ((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmskltz_b (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vmskltz_b ((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmskltz_h (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vmskltz_h ((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmskltz_w (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vmskltz_w ((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmskltz_d (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vmskltz_d ((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsigncov_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsigncov_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsigncov_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsigncov_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsigncov_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsigncov_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsigncov_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsigncov_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmadd_s (__m128 _1, __m128 _2, __m128 _3) ++{ ++ return (__m128)__builtin_lsx_vfmadd_s ((v4f32)_1, (v4f32)_2, (v4f32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmadd_d (__m128d _1, __m128d _2, __m128d _3) ++{ ++ return (__m128d)__builtin_lsx_vfmadd_d ((v2f64)_1, (v2f64)_2, (v2f64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmsub_s (__m128 _1, __m128 _2, __m128 _3) ++{ ++ return (__m128)__builtin_lsx_vfmsub_s ((v4f32)_1, (v4f32)_2, (v4f32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmsub_d (__m128d _1, __m128d _2, __m128d _3) ++{ ++ return (__m128d)__builtin_lsx_vfmsub_d ((v2f64)_1, (v2f64)_2, (v2f64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfnmadd_s (__m128 _1, __m128 _2, __m128 _3) ++{ ++ return (__m128)__builtin_lsx_vfnmadd_s ((v4f32)_1, (v4f32)_2, (v4f32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfnmadd_d (__m128d _1, __m128d _2, __m128d _3) ++{ ++ return (__m128d)__builtin_lsx_vfnmadd_d ((v2f64)_1, (v2f64)_2, (v2f64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfnmsub_s (__m128 _1, __m128 _2, __m128 _3) ++{ ++ return (__m128)__builtin_lsx_vfnmsub_s ((v4f32)_1, (v4f32)_2, (v4f32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfnmsub_d (__m128d _1, __m128d _2, __m128d _3) ++{ ++ return (__m128d)__builtin_lsx_vfnmsub_d ((v2f64)_1, (v2f64)_2, (v2f64)_3); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrne_w_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrne_w_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrne_l_d (__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrne_l_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrp_w_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrp_w_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrp_l_d (__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrp_l_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrm_w_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrm_w_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrm_l_d (__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrm_l_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftint_w_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vftint_w_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vffint_s_l (__m128i _1, __m128i _2) ++{ ++ return (__m128)__builtin_lsx_vffint_s_l ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrz_w_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vftintrz_w_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrp_w_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vftintrp_w_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrm_w_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vftintrm_w_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrne_w_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vftintrne_w_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintl_l_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintl_l_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftinth_l_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftinth_l_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vffinth_d_w (__m128i _1) ++{ ++ return (__m128d)__builtin_lsx_vffinth_d_w ((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vffintl_d_w (__m128i _1) ++{ ++ return (__m128d)__builtin_lsx_vffintl_d_w ((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrzl_l_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrzl_l_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrzh_l_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrzh_l_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrpl_l_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrpl_l_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrph_l_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrph_l_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrml_l_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrml_l_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrmh_l_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrmh_l_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrnel_l_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrnel_l_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrneh_l_s (__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrneh_l_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfrintrne_s (__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vfrintrne_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfrintrne_d (__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vfrintrne_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfrintrz_s (__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vfrintrz_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfrintrz_d (__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vfrintrz_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfrintrp_s (__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vfrintrp_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfrintrp_d (__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vfrintrp_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfrintrm_s (__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vfrintrm_s ((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfrintrm_d (__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vfrintrm_d ((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V16QI, CVPOINTER, SI, UQI. */ ++#define __lsx_vstelm_b(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ ++ ((void)__builtin_lsx_vstelm_b ((v16i8)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: vd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V8HI, CVPOINTER, SI, UQI. */ ++#define __lsx_vstelm_h(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ ++ ((void)__builtin_lsx_vstelm_h ((v8i16)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: vd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V4SI, CVPOINTER, SI, UQI. */ ++#define __lsx_vstelm_w(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ ++ ((void)__builtin_lsx_vstelm_w ((v4i32)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: vd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V2DI, CVPOINTER, SI, UQI. */ ++#define __lsx_vstelm_d(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \ ++ ((void)__builtin_lsx_vstelm_d ((v2i64)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_d_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_d_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_w_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_w_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_h_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_h_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_d_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_d_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_w_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_w_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_h_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_h_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_d_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_d_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_w_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_w_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_h_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_h_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_d_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_d_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_w_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_w_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_h_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_h_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_d_wu_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_d_wu_w ((v4u32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_w_hu_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_w_hu_h ((v8u16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_h_bu_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_h_bu_b ((v16u8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_d_wu_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_d_wu_w ((v4u32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_w_hu_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_w_hu_h ((v8u16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_h_bu_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_h_bu_b ((v16u8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_d_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_d_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_w_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_w_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_h_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_h_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_d_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_d_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_w_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_w_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_h_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_h_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_d_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_d_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_w_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_w_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_h_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_h_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_d_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_d_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_w_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_w_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_h_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_h_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_q_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_q_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_q_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_q_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_q_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_q_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_q_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_q_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_q_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_q_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_q_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_q_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_q_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_q_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_q_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_q_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_q_du_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_q_du_d ((v2u64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_q_du_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_q_du_d ((v2u64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_d_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_d_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_w_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_w_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_h_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_h_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_d_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_d_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_w_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_w_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_h_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_h_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_d_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_d_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_w_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_w_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_h_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_h_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_d_wu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_d_wu ((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_w_hu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_w_hu ((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_h_bu (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_h_bu ((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_d_wu_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_d_wu_w ((v4u32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_w_hu_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_w_hu_h ((v8u16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_h_bu_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_h_bu_b ((v16u8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_d_wu_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_d_wu_w ((v4u32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_w_hu_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_w_hu_h ((v8u16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_h_bu_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_h_bu_b ((v16u8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_q_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_q_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_q_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_q_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_q_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_q_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_q_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_q_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_q_du_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_q_du_d ((v2u64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_q_du_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_q_du_d ((v2u64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_q_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_q_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_qu_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_qu_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_q_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_q_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_qu_du (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_qu_du ((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_d_w (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_d_w ((v2i64)_1, (v4i32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_w_h (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_w_h ((v4i32)_1, (v8i16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_h_b (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_h_b ((v8i16)_1, (v16i8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_d_wu (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_d_wu ((v2u64)_1, (v4u32)_2, (v4u32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_w_hu (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_w_hu ((v4u32)_1, (v8u16)_2, (v8u16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_h_bu (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_h_bu ((v8u16)_1, (v16u8)_2, (v16u8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_d_w (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_d_w ((v2i64)_1, (v4i32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_w_h (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_w_h ((v4i32)_1, (v8i16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_h_b (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_h_b ((v8i16)_1, (v16i8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_d_wu (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_d_wu ((v2u64)_1, (v4u32)_2, (v4u32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_w_hu (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_w_hu ((v4u32)_1, (v8u16)_2, (v8u16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_h_bu (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_h_bu ((v8u16)_1, (v16u8)_2, (v16u8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, UV4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_d_wu_w (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_d_wu_w ((v2i64)_1, (v4u32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, UV8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_w_hu_h (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_w_hu_h ((v4i32)_1, (v8u16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, UV16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_h_bu_b (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_h_bu_b ((v8i16)_1, (v16u8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, UV4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_d_wu_w (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_d_wu_w ((v2i64)_1, (v4u32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, UV8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_w_hu_h (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_w_hu_h ((v4i32)_1, (v8u16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, UV16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_h_bu_b (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_h_bu_b ((v8i16)_1, (v16u8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_q_d (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_q_d ((v2i64)_1, (v2i64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_q_d (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_q_d ((v2i64)_1, (v2i64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_q_du (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_q_du ((v2u64)_1, (v2u64)_2, (v2u64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_q_du (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_q_du ((v2u64)_1, (v2u64)_2, (v2u64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, UV2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_q_du_d (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_q_du_d ((v2i64)_1, (v2u64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, UV2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_q_du_d (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_q_du_d ((v2i64)_1, (v2u64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vrotr_b (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vrotr_b ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vrotr_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vrotr_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vrotr_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vrotr_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vrotr_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vrotr_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadd_q (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadd_q ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsub_q (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsub_q ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, rj, si12. */ ++/* Data types in instruction templates: V16QI, CVPOINTER, SI. */ ++#define __lsx_vldrepl_b(/*void **/ _1, /*si12*/ _2) \ ++ ((__m128i)__builtin_lsx_vldrepl_b ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: vd, rj, si11. */ ++/* Data types in instruction templates: V8HI, CVPOINTER, SI. */ ++#define __lsx_vldrepl_h(/*void **/ _1, /*si11*/ _2) \ ++ ((__m128i)__builtin_lsx_vldrepl_h ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: vd, rj, si10. */ ++/* Data types in instruction templates: V4SI, CVPOINTER, SI. */ ++#define __lsx_vldrepl_w(/*void **/ _1, /*si10*/ _2) \ ++ ((__m128i)__builtin_lsx_vldrepl_w ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: vd, rj, si9. */ ++/* Data types in instruction templates: V2DI, CVPOINTER, SI. */ ++#define __lsx_vldrepl_d(/*void **/ _1, /*si9*/ _2) \ ++ ((__m128i)__builtin_lsx_vldrepl_d ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmskgez_b (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vmskgez_b ((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmsknz_b (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vmsknz_b ((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V8HI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_h_b (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_h_b ((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_w_h (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_w_h ((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_d_w (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_d_w ((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_q_d (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_q_d ((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV8HI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_hu_bu (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_hu_bu ((v16u8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV4SI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_wu_hu (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_wu_hu ((v8u16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV2DI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_du_wu (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_du_wu ((v4u32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_qu_du (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_qu_du ((v2u64)_1); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vrotri_b(/*__m128i*/ _1, /*ui3*/ _2) \ ++ ((__m128i)__builtin_lsx_vrotri_b ((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vrotri_h(/*__m128i*/ _1, /*ui4*/ _2) \ ++ ((__m128i)__builtin_lsx_vrotri_h ((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vrotri_w(/*__m128i*/ _1, /*ui5*/ _2) \ ++ ((__m128i)__builtin_lsx_vrotri_w ((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vrotri_d(/*__m128i*/ _1, /*ui6*/ _2) \ ++ ((__m128i)__builtin_lsx_vrotri_d ((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vextl_q_d (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vextl_q_d ((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vsrlni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrlni_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vsrlni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrlni_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vsrlni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrlni_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vsrlni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrlni_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vsrlrni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrlrni_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vsrlrni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrlrni_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vsrlrni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrlrni_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vsrlrni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrlrni_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vssrlni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlni_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vssrlni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlni_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vssrlni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlni_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vssrlni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlni_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ ++#define __lsx_vssrlni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlni_bu_h ((v16u8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ ++#define __lsx_vssrlni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlni_hu_w ((v8u16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ ++#define __lsx_vssrlni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlni_wu_d ((v4u32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ ++#define __lsx_vssrlni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlni_du_q ((v2u64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vssrlrni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlrni_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vssrlrni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlrni_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vssrlrni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlrni_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vssrlrni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlrni_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ ++#define __lsx_vssrlrni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlrni_bu_h ((v16u8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ ++#define __lsx_vssrlrni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlrni_hu_w ((v8u16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ ++#define __lsx_vssrlrni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlrni_wu_d ((v4u32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ ++#define __lsx_vssrlrni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrlrni_du_q ((v2u64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vsrani_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrani_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vsrani_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrani_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vsrani_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrani_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vsrani_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrani_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vsrarni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrarni_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vsrarni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrarni_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vsrarni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrarni_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vsrarni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ ++ ((__m128i)__builtin_lsx_vsrarni_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vssrani_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrani_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vssrani_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrani_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vssrani_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrani_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vssrani_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrani_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ ++#define __lsx_vssrani_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrani_bu_h ((v16u8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ ++#define __lsx_vssrani_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrani_hu_w ((v8u16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ ++#define __lsx_vssrani_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrani_wu_d ((v4u32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ ++#define __lsx_vssrani_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrani_du_q ((v2u64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vssrarni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrarni_b_h ((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vssrarni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrarni_h_w ((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vssrarni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrarni_w_d ((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vssrarni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrarni_d_q ((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ ++#define __lsx_vssrarni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrarni_bu_h ((v16u8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ ++#define __lsx_vssrarni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrarni_hu_w ((v8u16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ ++#define __lsx_vssrarni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrarni_wu_d ((v4u32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ ++#define __lsx_vssrarni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \ ++ ((__m128i)__builtin_lsx_vssrarni_du_q ((v2u64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vpermi_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \ ++ ((__m128i)__builtin_lsx_vpermi_w ((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, rj, si12. */ ++/* Data types in instruction templates: V16QI, CVPOINTER, SI. */ ++#define __lsx_vld(/*void **/ _1, /*si12*/ _2) \ ++ ((__m128i)__builtin_lsx_vld ((void *)(_1), (_2))) ++ ++/* Assembly instruction format: vd, rj, si12. */ ++/* Data types in instruction templates: VOID, V16QI, CVPOINTER, SI. */ ++#define __lsx_vst(/*__m128i*/ _1, /*void **/ _2, /*si12*/ _3) \ ++ ((void)__builtin_lsx_vst ((v16i8)(_1), (void *)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrlrn_b_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrlrn_b_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrlrn_h_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrlrn_h_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrlrn_w_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrlrn_w_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrln_b_h (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrln_b_h ((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrln_h_w (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrln_h_w ((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrln_w_d (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrln_w_d ((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vorn_v (__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vorn_v ((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, i13. */ ++/* Data types in instruction templates: V2DI, HI. */ ++#define __lsx_vldi(/*i13*/ _1) \ ++ ((__m128i)__builtin_lsx_vldi ((_1))) ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vshuf_b (__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vshuf_b ((v16i8)_1, (v16i8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, rj, rk. */ ++/* Data types in instruction templates: V16QI, CVPOINTER, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vldx (void * _1, long int _2) ++{ ++ return (__m128i)__builtin_lsx_vldx ((void *)_1, (long int)_2); ++} ++ ++/* Assembly instruction format: vd, rj, rk. */ ++/* Data types in instruction templates: VOID, V16QI, CVPOINTER, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++void __lsx_vstx (__m128i _1, void * _2, long int _3) ++{ ++ return (void)__builtin_lsx_vstx ((v16i8)_1, (void *)_2, (long int)_3); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vextl_qu_du (__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vextl_qu_du ((v2u64)_1); ++} ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV16QI. */ ++#define __lsx_bnz_b(/*__m128i*/ _1) \ ++ ((int)__builtin_lsx_bnz_b ((v16u8)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV2DI. */ ++#define __lsx_bnz_d(/*__m128i*/ _1) \ ++ ((int)__builtin_lsx_bnz_d ((v2u64)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV8HI. */ ++#define __lsx_bnz_h(/*__m128i*/ _1) \ ++ ((int)__builtin_lsx_bnz_h ((v8u16)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV16QI. */ ++#define __lsx_bnz_v(/*__m128i*/ _1) \ ++ ((int)__builtin_lsx_bnz_v ((v16u8)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV4SI. */ ++#define __lsx_bnz_w(/*__m128i*/ _1) \ ++ ((int)__builtin_lsx_bnz_w ((v4u32)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV16QI. */ ++#define __lsx_bz_b(/*__m128i*/ _1) \ ++ ((int)__builtin_lsx_bz_b ((v16u8)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV2DI. */ ++#define __lsx_bz_d(/*__m128i*/ _1) \ ++ ((int)__builtin_lsx_bz_d ((v2u64)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV8HI. */ ++#define __lsx_bz_h(/*__m128i*/ _1) \ ++ ((int)__builtin_lsx_bz_h ((v8u16)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV16QI. */ ++#define __lsx_bz_v(/*__m128i*/ _1) \ ++ ((int)__builtin_lsx_bz_v ((v16u8)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV4SI. */ ++#define __lsx_bz_w(/*__m128i*/ _1) \ ++ ((int)__builtin_lsx_bz_w ((v4u32)(_1))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_caf_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_caf_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_caf_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_caf_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_ceq_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_ceq_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_ceq_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_ceq_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cle_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cle_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cle_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cle_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_clt_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_clt_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_clt_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_clt_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cne_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cne_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cne_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cne_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cor_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cor_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cor_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cor_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cueq_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cueq_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cueq_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cueq_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cule_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cule_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cule_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cule_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cult_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cult_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cult_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cult_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cun_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cun_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cune_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cune_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cune_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cune_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cun_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cun_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_saf_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_saf_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_saf_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_saf_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_seq_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_seq_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_seq_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_seq_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sle_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sle_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sle_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sle_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_slt_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_slt_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_slt_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_slt_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sne_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sne_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sne_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sne_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sor_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sor_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sor_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sor_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sueq_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sueq_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sueq_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sueq_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sule_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sule_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sule_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sule_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sult_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sult_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sult_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sult_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sun_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sun_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sune_d (__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sune_d ((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sune_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sune_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sun_s (__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sun_s ((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, si10. */ ++/* Data types in instruction templates: V16QI, HI. */ ++#define __lsx_vrepli_b(/*si10*/ _1) \ ++ ((__m128i)__builtin_lsx_vrepli_b ((_1))) ++ ++/* Assembly instruction format: vd, si10. */ ++/* Data types in instruction templates: V2DI, HI. */ ++#define __lsx_vrepli_d(/*si10*/ _1) \ ++ ((__m128i)__builtin_lsx_vrepli_d ((_1))) ++ ++/* Assembly instruction format: vd, si10. */ ++/* Data types in instruction templates: V8HI, HI. */ ++#define __lsx_vrepli_h(/*si10*/ _1) \ ++ ((__m128i)__builtin_lsx_vrepli_h ((_1))) ++ ++/* Assembly instruction format: vd, si10. */ ++/* Data types in instruction templates: V4SI, HI. */ ++#define __lsx_vrepli_w(/*si10*/ _1) \ ++ ((__m128i)__builtin_lsx_vrepli_w ((_1))) ++ ++#endif /* defined(__loongarch_sx) */ ++#endif /* _GCC_LOONGSON_SXINTRIN_H */ +-- +2.33.0 + diff --git a/LoongArch-Add-built-in-functions-description-of-Loon.patch b/LoongArch-Add-built-in-functions-description-of-Loon.patch new file mode 100644 index 0000000000000000000000000000000000000000..b1817e2b7899209edfc08ea2862a88ca4d709acc --- /dev/null +++ b/LoongArch-Add-built-in-functions-description-of-Loon.patch @@ -0,0 +1,166 @@ +From 7cfe6e057045ac794afbe9097b1b211c0e1ea723 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Thu, 6 Apr 2023 16:02:07 +0800 +Subject: [PATCH 039/124] LoongArch: Add built-in functions description of + LoongArch Base instruction set instructions. + +gcc/ChangeLog: + + * doc/extend.texi: Add section for LoongArch Base Built-in functions. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/doc/extend.texi | 129 ++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 129 insertions(+) + +diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi +index 3c101ca89..1d1bac255 100644 +--- a/gcc/doc/extend.texi ++++ b/gcc/doc/extend.texi +@@ -14678,6 +14678,7 @@ instructions, but allow the compiler to schedule those calls. + * Blackfin Built-in Functions:: + * BPF Built-in Functions:: + * FR-V Built-in Functions:: ++* LoongArch Base Built-in Functions:: + * MIPS DSP Built-in Functions:: + * MIPS Paired-Single Support:: + * MIPS Loongson Built-in Functions:: +@@ -16128,6 +16129,134 @@ Use the @code{nldub} instruction to load the contents of address @var{x} + into the data cache. The instruction is issued in slot I1@. + @end table + ++@node LoongArch Base Built-in Functions ++@subsection LoongArch Base Built-in Functions ++ ++These built-in functions are available for LoongArch. ++ ++Data Type Description: ++@itemize ++@item @code{imm0_31}, a compile-time constant in range 0 to 31; ++@item @code{imm0_16383}, a compile-time constant in range 0 to 16383; ++@item @code{imm0_32767}, a compile-time constant in range 0 to 32767; ++@item @code{imm_n2048_2047}, a compile-time constant in range -2048 to 2047; ++@end itemize ++ ++The intrinsics provided are listed below: ++@smallexample ++ unsigned int __builtin_loongarch_movfcsr2gr (imm0_31) ++ void __builtin_loongarch_movgr2fcsr (imm0_31, unsigned int) ++ void __builtin_loongarch_cacop_d (imm0_31, unsigned long int, imm_n2048_2047) ++ unsigned int __builtin_loongarch_cpucfg (unsigned int) ++ void __builtin_loongarch_asrtle_d (long int, long int) ++ void __builtin_loongarch_asrtgt_d (long int, long int) ++ long int __builtin_loongarch_lddir_d (long int, imm0_31) ++ void __builtin_loongarch_ldpte_d (long int, imm0_31) ++ ++ int __builtin_loongarch_crc_w_b_w (char, int) ++ int __builtin_loongarch_crc_w_h_w (short, int) ++ int __builtin_loongarch_crc_w_w_w (int, int) ++ int __builtin_loongarch_crc_w_d_w (long int, int) ++ int __builtin_loongarch_crcc_w_b_w (char, int) ++ int __builtin_loongarch_crcc_w_h_w (short, int) ++ int __builtin_loongarch_crcc_w_w_w (int, int) ++ int __builtin_loongarch_crcc_w_d_w (long int, int) ++ ++ unsigned int __builtin_loongarch_csrrd_w (imm0_16383) ++ unsigned int __builtin_loongarch_csrwr_w (unsigned int, imm0_16383) ++ unsigned int __builtin_loongarch_csrxchg_w (unsigned int, unsigned int, imm0_16383) ++ unsigned long int __builtin_loongarch_csrrd_d (imm0_16383) ++ unsigned long int __builtin_loongarch_csrwr_d (unsigned long int, imm0_16383) ++ unsigned long int __builtin_loongarch_csrxchg_d (unsigned long int, unsigned long int, imm0_16383) ++ ++ unsigned char __builtin_loongarch_iocsrrd_b (unsigned int) ++ unsigned short __builtin_loongarch_iocsrrd_h (unsigned int) ++ unsigned int __builtin_loongarch_iocsrrd_w (unsigned int) ++ unsigned long int __builtin_loongarch_iocsrrd_d (unsigned int) ++ void __builtin_loongarch_iocsrwr_b (unsigned char, unsigned int) ++ void __builtin_loongarch_iocsrwr_h (unsigned short, unsigned int) ++ void __builtin_loongarch_iocsrwr_w (unsigned int, unsigned int) ++ void __builtin_loongarch_iocsrwr_d (unsigned long int, unsigned int) ++ ++ void __builtin_loongarch_dbar (imm0_32767) ++ void __builtin_loongarch_ibar (imm0_32767) ++ ++ void __builtin_loongarch_syscall (imm0_32767) ++ void __builtin_loongarch_break (imm0_32767) ++@end smallexample ++ ++@emph{Note:}Since the control register is divided into 32-bit and 64-bit, ++but the access instruction is not distinguished. So GCC renames the control ++instructions when implementing intrinsics. ++ ++Take the csrrd instruction as an example, built-in functions are implemented as follows: ++@smallexample ++ __builtin_loongarch_csrrd_w // When reading the 32-bit control register use. ++ __builtin_loongarch_csrrd_d // When reading the 64-bit control register use. ++@end smallexample ++ ++For the convenience of use, the built-in functions are encapsulated, ++the encapsulated functions and @code{__drdtime_t, __rdtime_t} are ++defined in the @code{larchintrin.h}. So if you call the following ++function you need to include @code{larchintrin.h}. ++ ++@smallexample ++ typedef struct drdtime@{ ++ unsigned long dvalue; ++ unsigned long dtimeid; ++ @} __drdtime_t; ++ ++ typedef struct rdtime@{ ++ unsigned int value; ++ unsigned int timeid; ++ @} __rdtime_t; ++@end smallexample ++ ++@smallexample ++ __drdtime_t __rdtime_d (void) ++ __rdtime_t __rdtimel_w (void) ++ __rdtime_t __rdtimeh_w (void) ++ unsigned int __movfcsr2gr (imm0_31) ++ void __movgr2fcsr (imm0_31, unsigned int) ++ void __cacop_d (imm0_31, unsigned long, imm_n2048_2047) ++ unsigned int __cpucfg (unsigned int) ++ void __asrtle_d (long int, long int) ++ void __asrtgt_d (long int, long int) ++ long int __lddir_d (long int, imm0_31) ++ void __ldpte_d (long int, imm0_31) ++ ++ int __crc_w_b_w (char, int) ++ int __crc_w_h_w (short, int) ++ int __crc_w_w_w (int, int) ++ int __crc_w_d_w (long int, int) ++ int __crcc_w_b_w (char, int) ++ int __crcc_w_h_w (short, int) ++ int __crcc_w_w_w (int, int) ++ int __crcc_w_d_w (long int, int) ++ ++ unsigned int __csrrd_w (imm0_16383) ++ unsigned int __csrwr_w (unsigned int, imm0_16383) ++ unsigned int __csrxchg_w (unsigned int, unsigned int, imm0_16383) ++ unsigned long __csrrd_d (imm0_16383) ++ unsigned long __csrwr_d (unsigned long, imm0_16383) ++ unsigned long __csrxchg_d (unsigned long, unsigned long, imm0_16383) ++ ++ unsigned char __iocsrrd_b (unsigned int) ++ unsigned short __iocsrrd_h (unsigned int) ++ unsigned int __iocsrrd_w (unsigned int) ++ unsigned long __iocsrrd_d (unsigned int) ++ void __iocsrwr_b (unsigned char, unsigned int) ++ void __iocsrwr_h (unsigned short, unsigned int) ++ void __iocsrwr_w (unsigned int, unsigned int) ++ void __iocsrwr_d (unsigned long, unsigned int) ++ ++ void __dbar (imm0_32767) ++ void __ibar (imm0_32767) ++ ++ void __syscall (imm0_32767) ++ void __break (imm0_32767) ++@end smallexample ++ + @node MIPS DSP Built-in Functions + @subsection MIPS DSP Built-in Functions + +-- +2.33.0 + diff --git a/LoongArch-Add-fcopysign-instructions.patch b/LoongArch-Add-fcopysign-instructions.patch new file mode 100644 index 0000000000000000000000000000000000000000..af82e3fd3eab33b24b5337cb31beb064b7111049 --- /dev/null +++ b/LoongArch-Add-fcopysign-instructions.patch @@ -0,0 +1,107 @@ +From 41a4945886631a1b2898ae957389d5db18a07141 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Fri, 4 Nov 2022 15:12:22 +0800 +Subject: [PATCH 025/124] LoongArch: Add fcopysign instructions + +Add fcopysign.{s,d} with the names copysign{sf,df}3 so GCC will expand +__builtin_copysign{f,} to a single instruction. + +Link: https://sourceware.org/pipermail/libc-alpha/2022-November/143177.html + +gcc/ChangeLog: + + * config/loongarch/loongarch.md (UNSPEC_FCOPYSIGN): New unspec. + (type): Add fcopysign. + (copysign3): New instruction template. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/fcopysign.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.md | 22 ++++++++++++++++++- + .../gcc.target/loongarch/fcopysign.c | 16 ++++++++++++++ + 2 files changed, 37 insertions(+), 1 deletion(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/fcopysign.c + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 214b14bdd..bda34d0f3 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -37,6 +37,7 @@ + UNSPEC_FCLASS + UNSPEC_FMAX + UNSPEC_FMIN ++ UNSPEC_FCOPYSIGN + + ;; Override return address for exception handling. + UNSPEC_EH_RETURN +@@ -214,6 +215,7 @@ + ;; fabs floating point absolute value + ;; fneg floating point negation + ;; fcmp floating point compare ++;; fcopysign floating point copysign + ;; fcvt floating point convert + ;; fsqrt floating point square root + ;; frsqrt floating point reciprocal square root +@@ -226,7 +228,7 @@ + "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore, + prefetch,prefetchx,condmove,mgtf,mftg,const,arith,logical, + shift,slt,signext,clz,trap,imul,idiv,move, +- fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcvt,fsqrt, ++ fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcopysign,fcvt,fsqrt, + frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost" + (cond [(eq_attr "jirl" "!unset") (const_string "call") + (eq_attr "got" "load") (const_string "load") +@@ -976,6 +978,24 @@ + (set_attr "mode" "")]) + + ;; ++;; .................... ++;; ++;; FLOATING POINT COPYSIGN ++;; ++;; .................... ++ ++(define_insn "copysign3" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")] ++ UNSPEC_FCOPYSIGN))] ++ "TARGET_HARD_FLOAT" ++ "fcopysign.\t%0,%1,%2" ++ [(set_attr "type" "fcopysign") ++ (set_attr "mode" "")]) ++ ++ ++;; + ;; ................... + ;; + ;; Count leading zeroes. +diff --git a/gcc/testsuite/gcc.target/loongarch/fcopysign.c b/gcc/testsuite/gcc.target/loongarch/fcopysign.c +new file mode 100644 +index 000000000..058ba2cf5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/fcopysign.c +@@ -0,0 +1,16 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mdouble-float" } */ ++/* { dg-final { scan-assembler "fcopysign\\.s" } } */ ++/* { dg-final { scan-assembler "fcopysign\\.d" } } */ ++ ++double ++my_copysign (double a, double b) ++{ ++ return __builtin_copysign (a, b); ++} ++ ++float ++my_copysignf (float a, float b) ++{ ++ return __builtin_copysignf (a, b); ++} +-- +2.33.0 + diff --git a/LoongArch-Add-flogb.-s-d-instructions-and-expand-log.patch b/LoongArch-Add-flogb.-s-d-instructions-and-expand-log.patch new file mode 100644 index 0000000000000000000000000000000000000000..0a3ca0fbd7e3e1f45dfdb7db502e055fdeba8a09 --- /dev/null +++ b/LoongArch-Add-flogb.-s-d-instructions-and-expand-log.patch @@ -0,0 +1,123 @@ +From 2ae587a86bba31b91a127e353c31c9f861ff5326 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Tue, 8 Nov 2022 13:42:20 +0800 +Subject: [PATCH 030/124] LoongArch: Add flogb.{s,d} instructions and expand + logb{sf,df}2 + +On LoongArch, flogb instructions extract the exponent of a non-negative +floating point value, but produces NaN for negative values. So we need +to add a fabs instruction when we expand logb. + +gcc/ChangeLog: + + * config/loongarch/loongarch.md (UNSPEC_FLOGB): New unspec. + (type): Add flogb. + (logb_non_negative2): New instruction template. + (logb2): New define_expand. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/flogb.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.md | 35 ++++++++++++++++++++-- + gcc/testsuite/gcc.target/loongarch/flogb.c | 18 +++++++++++ + 2 files changed, 51 insertions(+), 2 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/flogb.c + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index c141c9add..682ab9617 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -42,6 +42,7 @@ + UNSPEC_FTINTRM + UNSPEC_FTINTRP + UNSPEC_FSCALEB ++ UNSPEC_FLOGB + + ;; Override return address for exception handling. + UNSPEC_EH_RETURN +@@ -217,6 +218,7 @@ + ;; fdiv floating point divide + ;; frdiv floating point reciprocal divide + ;; fabs floating point absolute value ++;; flogb floating point exponent extract + ;; fneg floating point negation + ;; fcmp floating point compare + ;; fcopysign floating point copysign +@@ -233,8 +235,8 @@ + "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore, + prefetch,prefetchx,condmove,mgtf,mftg,const,arith,logical, + shift,slt,signext,clz,trap,imul,idiv,move, +- fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcopysign,fcvt,fscaleb, +- fsqrt,frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost" ++ fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,flogb,fneg,fcmp,fcopysign,fcvt, ++ fscaleb,fsqrt,frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost" + (cond [(eq_attr "jirl" "!unset") (const_string "call") + (eq_attr "got" "load") (const_string "load") + +@@ -1039,6 +1041,35 @@ + (set_attr "mode" "")]) + + ;; ++;; .................... ++;; ++;; FLOATING POINT EXPONENT EXTRACT ++;; ++;; .................... ++ ++(define_insn "logb_non_negative2" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] ++ UNSPEC_FLOGB))] ++ "TARGET_HARD_FLOAT" ++ "flogb.\t%0,%1" ++ [(set_attr "type" "flogb") ++ (set_attr "mode" "")]) ++ ++(define_expand "logb2" ++ [(set (match_operand:ANYF 0 "register_operand") ++ (unspec:ANYF [(abs:ANYF (match_operand:ANYF 1 "register_operand"))] ++ UNSPEC_FLOGB))] ++ "TARGET_HARD_FLOAT" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ ++ emit_insn (gen_abs2 (tmp, operands[1])); ++ emit_insn (gen_logb_non_negative2 (operands[0], tmp)); ++ DONE; ++}) ++ ++;; + ;; ................... + ;; + ;; Count leading zeroes. +diff --git a/gcc/testsuite/gcc.target/loongarch/flogb.c b/gcc/testsuite/gcc.target/loongarch/flogb.c +new file mode 100644 +index 000000000..1daefe54e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/flogb.c +@@ -0,0 +1,18 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mdouble-float -fno-math-errno" } */ ++/* { dg-final { scan-assembler "fabs\\.s" } } */ ++/* { dg-final { scan-assembler "fabs\\.d" } } */ ++/* { dg-final { scan-assembler "flogb\\.s" } } */ ++/* { dg-final { scan-assembler "flogb\\.d" } } */ ++ ++double ++my_logb (double a) ++{ ++ return __builtin_logb (a); ++} ++ ++float ++my_logbf (float a) ++{ ++ return __builtin_logbf (a); ++} +-- +2.33.0 + diff --git a/LoongArch-Add-fscaleb.-s-d-instructions-as-ldexp-sf-.patch b/LoongArch-Add-fscaleb.-s-d-instructions-as-ldexp-sf-.patch new file mode 100644 index 0000000000000000000000000000000000000000..736333d9b80eee42ceb1bdc42e1c6851cf9c6642 --- /dev/null +++ b/LoongArch-Add-fscaleb.-s-d-instructions-as-ldexp-sf-.patch @@ -0,0 +1,155 @@ +From e3d69a3b7a4e00e8bba88b8b4abaa1c17bc083d5 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Tue, 8 Nov 2022 12:14:35 +0800 +Subject: [PATCH 029/124] LoongArch: Add fscaleb.{s,d} instructions as + ldexp{sf,df}3 + +This allows optimizing __builtin_ldexp{,f} and __builtin_scalbn{,f} with +-fno-math-errno. + +IMODE is added because we can't hard code SI for operand 2: fscaleb.d +instruction always take the high half of both source registers into +account. See my_ldexp_long in the test case. + +gcc/ChangeLog: + + * config/loongarch/loongarch.md (UNSPEC_FSCALEB): New unspec. + (type): Add fscaleb. + (IMODE): New mode attr. + (ldexp3): New instruction template. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/fscaleb.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.md | 26 ++++++++++- + gcc/testsuite/gcc.target/loongarch/fscaleb.c | 48 ++++++++++++++++++++ + 2 files changed, 72 insertions(+), 2 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/fscaleb.c + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index eb127c346..c141c9add 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -41,6 +41,7 @@ + UNSPEC_FTINT + UNSPEC_FTINTRM + UNSPEC_FTINTRP ++ UNSPEC_FSCALEB + + ;; Override return address for exception handling. + UNSPEC_EH_RETURN +@@ -220,6 +221,7 @@ + ;; fcmp floating point compare + ;; fcopysign floating point copysign + ;; fcvt floating point convert ++;; fscaleb floating point scale + ;; fsqrt floating point square root + ;; frsqrt floating point reciprocal square root + ;; multi multiword sequence (or user asm statements) +@@ -231,8 +233,8 @@ + "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore, + prefetch,prefetchx,condmove,mgtf,mftg,const,arith,logical, + shift,slt,signext,clz,trap,imul,idiv,move, +- fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcopysign,fcvt,fsqrt, +- frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost" ++ fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcopysign,fcvt,fscaleb, ++ fsqrt,frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost" + (cond [(eq_attr "jirl" "!unset") (const_string "call") + (eq_attr "got" "load") (const_string "load") + +@@ -418,6 +420,10 @@ + ;; the controlling mode. + (define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")]) + ++;; This attribute gives the integer mode that has the same size of a ++;; floating-point mode. ++(define_mode_attr IMODE [(SF "SI") (DF "DI")]) ++ + ;; This code iterator allows signed and unsigned widening multiplications + ;; to use the same template. + (define_code_iterator any_extend [sign_extend zero_extend]) +@@ -1014,7 +1020,23 @@ + "fcopysign.\t%0,%1,%2" + [(set_attr "type" "fcopysign") + (set_attr "mode" "")]) ++ ++;; ++;; .................... ++;; ++;; FLOATING POINT SCALE ++;; ++;; .................... + ++(define_insn "ldexp3" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f") ++ (match_operand: 2 "register_operand" "f")] ++ UNSPEC_FSCALEB))] ++ "TARGET_HARD_FLOAT" ++ "fscaleb.\t%0,%1,%2" ++ [(set_attr "type" "fscaleb") ++ (set_attr "mode" "")]) + + ;; + ;; ................... +diff --git a/gcc/testsuite/gcc.target/loongarch/fscaleb.c b/gcc/testsuite/gcc.target/loongarch/fscaleb.c +new file mode 100644 +index 000000000..f18470fbb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/fscaleb.c +@@ -0,0 +1,48 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -mabi=lp64d -mdouble-float -fno-math-errno" } */ ++/* { dg-final { scan-assembler-times "fscaleb\\.s" 3 } } */ ++/* { dg-final { scan-assembler-times "fscaleb\\.d" 4 } } */ ++/* { dg-final { scan-assembler-times "slli\\.w" 1 } } */ ++ ++double ++my_scalbln (double a, long b) ++{ ++ return __builtin_scalbln (a, b); ++} ++ ++double ++my_scalbn (double a, int b) ++{ ++ return __builtin_scalbn (a, b); ++} ++ ++double ++my_ldexp (double a, int b) ++{ ++ return __builtin_ldexp (a, b); ++} ++ ++float ++my_scalblnf (float a, long b) ++{ ++ return __builtin_scalblnf (a, b); ++} ++ ++float ++my_scalbnf (float a, int b) ++{ ++ return __builtin_scalbnf (a, b); ++} ++ ++float ++my_ldexpf (float a, int b) ++{ ++ return __builtin_ldexpf (a, b); ++} ++ ++/* b must be sign-extended */ ++double ++my_ldexp_long (double a, long b) ++{ ++ return __builtin_ldexp (a, b); ++} +-- +2.33.0 + diff --git a/LoongArch-Add-ftint-rm-rp-.-w-l-.-s-d-instructions.patch b/LoongArch-Add-ftint-rm-rp-.-w-l-.-s-d-instructions.patch new file mode 100644 index 0000000000000000000000000000000000000000..47dacdf510336d0d18e73555cf58272bf99db093 --- /dev/null +++ b/LoongArch-Add-ftint-rm-rp-.-w-l-.-s-d-instructions.patch @@ -0,0 +1,220 @@ +From 76d599c6d8f9cf78b51cd76a7ca8fbe11e2cda2b Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Sun, 6 Nov 2022 23:16:49 +0800 +Subject: [PATCH 028/124] LoongArch: Add ftint{,rm,rp}.{w,l}.{s,d} instructions + +This allows to optimize the following builtins if -fno-math-errno: + +- __builtin_lrint{,f} +- __builtin_lfloor{,f} +- __builtin_lceil{,f} + +Inspired by +https://gcc.gnu.org/pipermail/gcc-patches/2022-November/605287.html. + +ANYFI is added so the compiler won't try ftint.l.s if -mfpu=32. If we +simply used GPR here an ICE would be triggered with __builtin_lrintf +and -mfpu=32. + +ftint{rm,rp} instructions may raise inexact exception, so they can't be +used if -fno-trapping-math -fno-fp-int-builtin-inexact. + +Note that the .w.{s,d} variants are not tested because we don't support +ILP32 for now. + +gcc/ChangeLog: + + * config/loongarch/loongarch.md (UNSPEC_FTINT): New unspec. + (UNSPEC_FTINTRM): Likewise. + (UNSPEC_FTINTRP): Likewise. + (LRINT): New define_int_iterator. + (lrint_pattern): New define_int_attr. + (lrint_submenmonic): Likewise. + (lrint_allow_inexact): Likewise. + (ANYFI): New define_mode_iterator. + (lrint): New instruction template. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/ftint.c: New test. + * gcc.target/loongarch/ftint-no-inexact.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.md | 34 ++++++++++++++ + .../gcc.target/loongarch/ftint-no-inexact.c | 44 +++++++++++++++++++ + gcc/testsuite/gcc.target/loongarch/ftint.c | 44 +++++++++++++++++++ + 3 files changed, 122 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/ftint-no-inexact.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/ftint.c + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index a14ab14ac..eb127c346 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -38,6 +38,9 @@ + UNSPEC_FMAX + UNSPEC_FMIN + UNSPEC_FCOPYSIGN ++ UNSPEC_FTINT ++ UNSPEC_FTINTRM ++ UNSPEC_FTINTRP + + ;; Override return address for exception handling. + UNSPEC_EH_RETURN +@@ -374,6 +377,11 @@ + (define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT") + (DF "TARGET_DOUBLE_FLOAT")]) + ++;; Iterator for fixed-point modes which can be hold by a hardware ++;; floating-point register. ++(define_mode_iterator ANYFI [(SI "TARGET_HARD_FLOAT") ++ (DI "TARGET_DOUBLE_FLOAT")]) ++ + ;; A mode for which moves involving FPRs may need to be split. + (define_mode_iterator SPLITF + [(DF "!TARGET_64BIT && TARGET_DOUBLE_FLOAT") +@@ -515,6 +523,19 @@ + (define_code_attr sel [(eq "masknez") (ne "maskeqz")]) + (define_code_attr selinv [(eq "maskeqz") (ne "masknez")]) + ++;; Iterator and attributes for floating-point to fixed-point conversion ++;; instructions. ++(define_int_iterator LRINT [UNSPEC_FTINT UNSPEC_FTINTRM UNSPEC_FTINTRP]) ++(define_int_attr lrint_pattern [(UNSPEC_FTINT "lrint") ++ (UNSPEC_FTINTRM "lfloor") ++ (UNSPEC_FTINTRP "lceil")]) ++(define_int_attr lrint_submenmonic [(UNSPEC_FTINT "") ++ (UNSPEC_FTINTRM "rm") ++ (UNSPEC_FTINTRP "rp")]) ++(define_int_attr lrint_allow_inexact [(UNSPEC_FTINT "1") ++ (UNSPEC_FTINTRM "0") ++ (UNSPEC_FTINTRP "0")]) ++ + ;; + ;; .................... + ;; +@@ -2022,6 +2043,19 @@ + [(set_attr "type" "fcvt") + (set_attr "mode" "")]) + ++;; Convert floating-point numbers to integers ++(define_insn "2" ++ [(set (match_operand:ANYFI 0 "register_operand" "=f") ++ (unspec:ANYFI [(match_operand:ANYF 1 "register_operand" "f")] ++ LRINT))] ++ "TARGET_HARD_FLOAT && ++ ( ++ || flag_fp_int_builtin_inexact ++ || !flag_trapping_math)" ++ "ftint.. %0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "")]) ++ + ;; Load the low word of operand 0 with operand 1. + (define_insn "load_low" + [(set (match_operand:SPLITF 0 "register_operand" "=f,f") +diff --git a/gcc/testsuite/gcc.target/loongarch/ftint-no-inexact.c b/gcc/testsuite/gcc.target/loongarch/ftint-no-inexact.c +new file mode 100644 +index 000000000..88b83a9c0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/ftint-no-inexact.c +@@ -0,0 +1,44 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -mdouble-float -fno-math-errno -fno-fp-int-builtin-inexact" } */ ++/* { dg-final { scan-assembler "ftint\\.l\\.s" } } */ ++/* { dg-final { scan-assembler "ftint\\.l\\.d" } } */ ++/* { dg-final { scan-assembler-not "ftintrm\\.l\\.s" } } */ ++/* { dg-final { scan-assembler-not "ftintrm\\.l\\.d" } } */ ++/* { dg-final { scan-assembler-not "ftintrp\\.l\\.s" } } */ ++/* { dg-final { scan-assembler-not "ftintrp\\.l\\.d" } } */ ++ ++long ++my_lrint (double a) ++{ ++ return __builtin_lrint (a); ++} ++ ++long ++my_lrintf (float a) ++{ ++ return __builtin_lrintf (a); ++} ++ ++long ++my_lfloor (double a) ++{ ++ return __builtin_lfloor (a); ++} ++ ++long ++my_lfloorf (float a) ++{ ++ return __builtin_lfloorf (a); ++} ++ ++long ++my_lceil (double a) ++{ ++ return __builtin_lceil (a); ++} ++ ++long ++my_lceilf (float a) ++{ ++ return __builtin_lceilf (a); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/ftint.c b/gcc/testsuite/gcc.target/loongarch/ftint.c +new file mode 100644 +index 000000000..7a326a454 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/ftint.c +@@ -0,0 +1,44 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -mdouble-float -fno-math-errno -ffp-int-builtin-inexact" } */ ++/* { dg-final { scan-assembler "ftint\\.l\\.s" } } */ ++/* { dg-final { scan-assembler "ftint\\.l\\.d" } } */ ++/* { dg-final { scan-assembler "ftintrm\\.l\\.s" } } */ ++/* { dg-final { scan-assembler "ftintrm\\.l\\.d" } } */ ++/* { dg-final { scan-assembler "ftintrp\\.l\\.s" } } */ ++/* { dg-final { scan-assembler "ftintrp\\.l\\.d" } } */ ++ ++long ++my_lrint (double a) ++{ ++ return __builtin_lrint (a); ++} ++ ++long ++my_lrintf (float a) ++{ ++ return __builtin_lrintf (a); ++} ++ ++long ++my_lfloor (double a) ++{ ++ return __builtin_lfloor (a); ++} ++ ++long ++my_lfloorf (float a) ++{ ++ return __builtin_lfloorf (a); ++} ++ ++long ++my_lceil (double a) ++{ ++ return __builtin_lceil (a); ++} ++ ++long ++my_lceilf (float a) ++{ ++ return __builtin_lceilf (a); ++} +-- +2.33.0 + diff --git a/LoongArch-Add-new-code-model-medium.patch b/LoongArch-Add-new-code-model-medium.patch new file mode 100644 index 0000000000000000000000000000000000000000..71974e06182e1f41429e706614e81d9fc65e0f6d --- /dev/null +++ b/LoongArch-Add-new-code-model-medium.patch @@ -0,0 +1,1051 @@ +From 893322f214fbb916dc8eb6be5acbf7bdb7785e77 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Sat, 20 Aug 2022 15:19:51 +0800 +Subject: [PATCH 012/124] LoongArch: Add new code model 'medium'. + +The function jump instruction in normal mode is 'bl', +so the scope of the function jump is +-128MB. + +Now we've added support for 'medium' mode, this mode is +to complete the function jump through two instructions: + pcalau12i + jirl +So in this mode the function jump range is increased to +-2GB. + +Compared with 'normal' mode, 'medium' mode only affects the +jump range of functions. + +gcc/ChangeLog: + + * config/loongarch/genopts/loongarch-strings: Support code model medium. + * config/loongarch/genopts/loongarch.opt.in: Likewise. + * config/loongarch/loongarch-def.c: Likewise. + * config/loongarch/loongarch-def.h (CMODEL_LARGE): Likewise. + (CMODEL_EXTREME): Likewise. + (N_CMODEL_TYPES): Likewise. + (CMODEL_MEDIUM): Likewise. + * config/loongarch/loongarch-opts.cc: Likewise. + * config/loongarch/loongarch-opts.h (TARGET_CMODEL_MEDIUM): Likewise. + * config/loongarch/loongarch-str.h (STR_CMODEL_MEDIUM): Likewise. + * config/loongarch/loongarch.cc (loongarch_call_tls_get_addr): + Tls symbol Loading support medium mode. + (loongarch_legitimize_call_address): When medium mode, make a symbolic + jump with two instructions. + (loongarch_option_override_internal): Support medium. + * config/loongarch/loongarch.md (@pcalau12i): New template. + (@sibcall_internal_1): New function call templates added to support + medium mode. + (@sibcall_value_internal_1): Likewise. + (@sibcall_value_multiple_internal_1): Likewise. + (@call_internal_1): Likewise. + (@call_value_internal_1): Likewise. + (@call_value_multiple_internal_1): Likewise. + * config/loongarch/loongarch.opt: Support medium. + * config/loongarch/predicates.md: Add processing about medium mode. + * doc/invoke.texi: Document for '-mcmodel=medium'. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/func-call-medium-1.c: New test. + * gcc.target/loongarch/func-call-medium-2.c: New test. + * gcc.target/loongarch/func-call-medium-3.c: New test. + * gcc.target/loongarch/func-call-medium-4.c: New test. + * gcc.target/loongarch/func-call-medium-5.c: New test. + * gcc.target/loongarch/func-call-medium-6.c: New test. + * gcc.target/loongarch/func-call-medium-7.c: New test. + * gcc.target/loongarch/func-call-medium-8.c: New test. + * gcc.target/loongarch/tls-gd-noplt.c: Add compile parameter '-mexplicit-relocs'. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/genopts/loongarch-strings | 1 + + gcc/config/loongarch/genopts/loongarch.opt.in | 3 + + gcc/config/loongarch/loongarch-def.c | 1 + + gcc/config/loongarch/loongarch-def.h | 7 +- + gcc/config/loongarch/loongarch-opts.cc | 15 ++- + gcc/config/loongarch/loongarch-opts.h | 1 + + gcc/config/loongarch/loongarch-str.h | 1 + + gcc/config/loongarch/loongarch.cc | 123 +++++++++++++---- + gcc/config/loongarch/loongarch.md | 125 +++++++++++++++++- + gcc/config/loongarch/loongarch.opt | 3 + + gcc/config/loongarch/predicates.md | 15 ++- + gcc/doc/invoke.texi | 3 + + .../gcc.target/loongarch/func-call-medium-1.c | 41 ++++++ + .../gcc.target/loongarch/func-call-medium-2.c | 41 ++++++ + .../gcc.target/loongarch/func-call-medium-3.c | 41 ++++++ + .../gcc.target/loongarch/func-call-medium-4.c | 41 ++++++ + .../gcc.target/loongarch/func-call-medium-5.c | 42 ++++++ + .../gcc.target/loongarch/func-call-medium-6.c | 42 ++++++ + .../gcc.target/loongarch/func-call-medium-7.c | 43 ++++++ + .../gcc.target/loongarch/func-call-medium-8.c | 42 ++++++ + .../gcc.target/loongarch/tls-gd-noplt.c | 4 +- + 21 files changed, 595 insertions(+), 40 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-4.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-5.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-6.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-7.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-8.c + +diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings +index cb88ed56b..44ebb7ab1 100644 +--- a/gcc/config/loongarch/genopts/loongarch-strings ++++ b/gcc/config/loongarch/genopts/loongarch-strings +@@ -54,5 +54,6 @@ OPTSTR_CMODEL cmodel + STR_CMODEL_NORMAL normal + STR_CMODEL_TINY tiny + STR_CMODEL_TS tiny-static ++STR_CMODEL_MEDIUM medium + STR_CMODEL_LARGE large + STR_CMODEL_EXTREME extreme +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +index a571b6b75..ebdd9538d 100644 +--- a/gcc/config/loongarch/genopts/loongarch.opt.in ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -172,6 +172,9 @@ Enum(cmodel) String(@@STR_CMODEL_TINY@@) Value(CMODEL_TINY) + EnumValue + Enum(cmodel) String(@@STR_CMODEL_TS@@) Value(CMODEL_TINY_STATIC) + ++EnumValue ++Enum(cmodel) String(@@STR_CMODEL_MEDIUM@@) Value(CMODEL_MEDIUM) ++ + EnumValue + Enum(cmodel) String(@@STR_CMODEL_LARGE@@) Value(CMODEL_LARGE) + +diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c +index c8769b7d6..cbf995d81 100644 +--- a/gcc/config/loongarch/loongarch-def.c ++++ b/gcc/config/loongarch/loongarch-def.c +@@ -152,6 +152,7 @@ loongarch_cmodel_strings[] = { + [CMODEL_NORMAL] = STR_CMODEL_NORMAL, + [CMODEL_TINY] = STR_CMODEL_TINY, + [CMODEL_TINY_STATIC] = STR_CMODEL_TS, ++ [CMODEL_MEDIUM] = STR_CMODEL_MEDIUM, + [CMODEL_LARGE] = STR_CMODEL_LARGE, + [CMODEL_EXTREME] = STR_CMODEL_EXTREME, + }; +diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h +index c2c35b6ba..b5985f070 100644 +--- a/gcc/config/loongarch/loongarch-def.h ++++ b/gcc/config/loongarch/loongarch-def.h +@@ -82,9 +82,10 @@ extern const char* loongarch_cmodel_strings[]; + #define CMODEL_NORMAL 0 + #define CMODEL_TINY 1 + #define CMODEL_TINY_STATIC 2 +-#define CMODEL_LARGE 3 +-#define CMODEL_EXTREME 4 +-#define N_CMODEL_TYPES 5 ++#define CMODEL_MEDIUM 3 ++#define CMODEL_LARGE 4 ++#define CMODEL_EXTREME 5 ++#define N_CMODEL_TYPES 6 + + /* enum switches */ + /* The "SW_" codes represent command-line switches (options that +diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc +index 2ae89f234..e13eafb58 100644 +--- a/gcc/config/loongarch/loongarch-opts.cc ++++ b/gcc/config/loongarch/loongarch-opts.cc +@@ -376,11 +376,24 @@ fallback: + + /* 5. Target code model */ + t.cmodel = constrained.cmodel ? opt_cmodel : CMODEL_NORMAL; +- if (t.cmodel != CMODEL_NORMAL && t.cmodel != CMODEL_EXTREME) ++ ++ switch (t.cmodel) + { ++ case CMODEL_TINY: ++ case CMODEL_TINY_STATIC: ++ case CMODEL_LARGE: + warning (0, "%qs is not supported, now cmodel is set to %qs", + loongarch_cmodel_strings[t.cmodel], "normal"); + t.cmodel = CMODEL_NORMAL; ++ break; ++ ++ case CMODEL_NORMAL: ++ case CMODEL_MEDIUM: ++ case CMODEL_EXTREME: ++ break; ++ ++ default: ++ gcc_unreachable (); + } + + /* Cleanup and return. */ +diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h +index da24ecd2b..3523a4cf7 100644 +--- a/gcc/config/loongarch/loongarch-opts.h ++++ b/gcc/config/loongarch/loongarch-opts.h +@@ -46,6 +46,7 @@ loongarch_config_target (struct loongarch_target *target, + #define TARGET_CMODEL_NORMAL (la_target.cmodel == CMODEL_NORMAL) + #define TARGET_CMODEL_TINY (la_target.cmodel == CMODEL_TINY) + #define TARGET_CMODEL_TINY_STATIC (la_target.cmodel == CMODEL_TINY_STATIC) ++#define TARGET_CMODEL_MEDIUM (la_target.cmodel == CMODEL_MEDIUM) + #define TARGET_CMODEL_LARGE (la_target.cmodel == CMODEL_LARGE) + #define TARGET_CMODEL_EXTREME (la_target.cmodel == CMODEL_EXTREME) + +diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h +index 0e8889b8c..9f1b0989c 100644 +--- a/gcc/config/loongarch/loongarch-str.h ++++ b/gcc/config/loongarch/loongarch-str.h +@@ -53,6 +53,7 @@ along with GCC; see the file COPYING3. If not see + #define STR_CMODEL_NORMAL "normal" + #define STR_CMODEL_TINY "tiny" + #define STR_CMODEL_TS "tiny-static" ++#define STR_CMODEL_MEDIUM "medium" + #define STR_CMODEL_LARGE "large" + #define STR_CMODEL_EXTREME "extreme" + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 1a33f668f..04c4ddaed 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -2461,44 +2461,96 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0) + } + + if (flag_plt) +- insn = emit_call_insn (gen_call_value_internal (v0, +- loongarch_tls_symbol, +- const0_rtx)); +- else + { +- rtx dest = gen_reg_rtx (Pmode); +- +- if (TARGET_CMODEL_EXTREME) ++ switch (la_opt_cmodel) + { +- gcc_assert (TARGET_EXPLICIT_RELOCS); ++ case CMODEL_NORMAL: ++ insn = emit_call_insn (gen_call_value_internal (v0, ++ loongarch_tls_symbol, ++ const0_rtx)); ++ break; + +- rtx tmp1 = gen_reg_rtx (Pmode); +- rtx high = gen_reg_rtx (Pmode); ++ case CMODEL_MEDIUM: ++ { ++ rtx reg = gen_reg_rtx (Pmode); ++ if (TARGET_EXPLICIT_RELOCS) ++ { ++ emit_insn (gen_pcalau12i (Pmode, reg, loongarch_tls_symbol)); ++ rtx call = gen_call_value_internal_1 (Pmode, v0, reg, ++ loongarch_tls_symbol, ++ const0_rtx); ++ insn = emit_call_insn (call); ++ } ++ else ++ { ++ emit_move_insn (reg, loongarch_tls_symbol); ++ insn = emit_call_insn (gen_call_value_internal (v0, ++ reg, ++ const0_rtx)); ++ } ++ break; ++ } + +- loongarch_emit_move (high, +- gen_rtx_HIGH (Pmode, loongarch_tls_symbol)); +- loongarch_emit_move (tmp1, gen_rtx_LO_SUM (Pmode, +- gen_rtx_REG (Pmode, 0), +- loongarch_tls_symbol)); +- emit_insn (gen_lui_h_lo20 (tmp1, tmp1, loongarch_tls_symbol)); +- emit_insn (gen_lui_h_hi12 (tmp1, tmp1, loongarch_tls_symbol)); +- loongarch_emit_move (dest, +- gen_rtx_MEM (Pmode, +- gen_rtx_PLUS (Pmode, high, tmp1))); ++ /* code model extreme not support plt. */ ++ case CMODEL_EXTREME: ++ case CMODEL_LARGE: ++ case CMODEL_TINY: ++ case CMODEL_TINY_STATIC: ++ default: ++ gcc_unreachable (); + } +- else ++ } ++ else ++ { ++ rtx dest = gen_reg_rtx (Pmode); ++ ++ switch (la_opt_cmodel) + { +- if (TARGET_EXPLICIT_RELOCS) ++ case CMODEL_NORMAL: ++ case CMODEL_MEDIUM: ++ { ++ if (TARGET_EXPLICIT_RELOCS) ++ { ++ rtx high = gen_reg_rtx (Pmode); ++ loongarch_emit_move (high, ++ gen_rtx_HIGH (Pmode, ++ loongarch_tls_symbol)); ++ emit_insn (gen_ld_from_got (Pmode, dest, high, ++ loongarch_tls_symbol)); ++ } ++ else ++ loongarch_emit_move (dest, loongarch_tls_symbol); ++ break; ++ } ++ ++ case CMODEL_EXTREME: + { ++ gcc_assert (TARGET_EXPLICIT_RELOCS); ++ ++ rtx tmp1 = gen_reg_rtx (Pmode); + rtx high = gen_reg_rtx (Pmode); ++ + loongarch_emit_move (high, + gen_rtx_HIGH (Pmode, loongarch_tls_symbol)); +- emit_insn (gen_ld_from_got (Pmode, dest, high, +- loongarch_tls_symbol)); ++ loongarch_emit_move (tmp1, gen_rtx_LO_SUM (Pmode, ++ gen_rtx_REG (Pmode, 0), ++ loongarch_tls_symbol)); ++ emit_insn (gen_lui_h_lo20 (tmp1, tmp1, loongarch_tls_symbol)); ++ emit_insn (gen_lui_h_hi12 (tmp1, tmp1, loongarch_tls_symbol)); ++ loongarch_emit_move (dest, ++ gen_rtx_MEM (Pmode, ++ gen_rtx_PLUS (Pmode, ++ high, tmp1))); + } +- else +- loongarch_emit_move (dest, loongarch_tls_symbol); ++ break; ++ ++ case CMODEL_LARGE: ++ case CMODEL_TINY: ++ case CMODEL_TINY_STATIC: ++ default: ++ gcc_unreachable (); + } ++ + insn = emit_call_insn (gen_call_value_internal (v0, dest, const0_rtx)); + } + +@@ -2618,6 +2670,24 @@ loongarch_legitimize_call_address (rtx addr) + loongarch_emit_move (reg, addr); + return reg; + } ++ ++ enum loongarch_symbol_type symbol_type = loongarch_classify_symbol (addr); ++ ++ /* Split function call insn 'bl sym' or 'bl %plt(sym)' to : ++ pcalau12i $rd, %pc_hi20(sym) ++ jr $rd, %pc_lo12(sym). */ ++ ++ if (TARGET_CMODEL_MEDIUM ++ && TARGET_EXPLICIT_RELOCS ++ && (SYMBOL_REF_P (addr) || LABEL_REF_P (addr)) ++ && (symbol_type == SYMBOL_PCREL ++ || (symbol_type == SYMBOL_GOT_DISP && flag_plt))) ++ { ++ rtx reg = gen_reg_rtx (Pmode); ++ emit_insn (gen_pcalau12i (Pmode, reg, addr)); ++ return gen_rtx_LO_SUM (Pmode, reg, addr); ++ } ++ + return addr; + } + +@@ -5996,6 +6066,7 @@ loongarch_option_override_internal (struct gcc_options *opts) + break; + + case CMODEL_TINY_STATIC: ++ case CMODEL_MEDIUM: + case CMODEL_NORMAL: + case CMODEL_TINY: + case CMODEL_LARGE: +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 8fc10444c..3787fd823 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -59,11 +59,15 @@ + UNSPEC_CRCC + + UNSPEC_LOAD_FROM_GOT ++ UNSPEC_PCALAU12I + UNSPEC_ORI_L_LO12 + UNSPEC_LUI_L_HI20 + UNSPEC_LUI_H_LO20 + UNSPEC_LUI_H_HI12 + UNSPEC_TLS_LOW ++ ++ UNSPEC_SIBCALL_VALUE_MULTIPLE_INTERNAL_1 ++ UNSPEC_CALL_VALUE_MULTIPLE_INTERNAL_1 + ]) + + (define_c_enum "unspecv" [ +@@ -1946,6 +1950,14 @@ + [(set_attr "type" "move")] + ) + ++(define_insn "@pcalau12i" ++ [(set (match_operand:P 0 "register_operand" "=j") ++ (unspec:P [(match_operand:P 1 "symbolic_operand" "")] ++ UNSPEC_PCALAU12I))] ++ "" ++ "pcalau12i\t%0,%%pc_hi20(%1)" ++ [(set_attr "type" "move")]) ++ + (define_insn "@ori_l_lo12" + [(set (match_operand:P 0 "register_operand" "=r") + (unspec:P [(match_operand:P 1 "register_operand" "r") +@@ -2877,7 +2889,12 @@ + { + rtx target = loongarch_legitimize_call_address (XEXP (operands[0], 0)); + +- emit_call_insn (gen_sibcall_internal (target, operands[1])); ++ if (GET_CODE (target) == LO_SUM) ++ emit_call_insn (gen_sibcall_internal_1 (Pmode, XEXP (target, 0), ++ XEXP (target, 1), ++ operands[1])); ++ else ++ emit_call_insn (gen_sibcall_internal (target, operands[1])); + DONE; + }) + +@@ -2891,6 +2908,14 @@ + b\t%%plt(%0)" + [(set_attr "jirl" "indirect,direct,direct")]) + ++(define_insn "@sibcall_internal_1" ++ [(call (mem:P (lo_sum:P (match_operand:P 0 "register_operand" "j") ++ (match_operand:P 1 "symbolic_operand" ""))) ++ (match_operand 2 "" ""))] ++ "SIBLING_CALL_P (insn) && TARGET_CMODEL_MEDIUM" ++ "jirl\t$r0,%0,%%pc_lo12(%1)" ++ [(set_attr "jirl" "indirect")]) ++ + (define_expand "sibcall_value" + [(parallel [(set (match_operand 0 "") + (call (match_operand 1 "") +@@ -2906,7 +2931,14 @@ + rtx arg1 = XEXP (XVECEXP (operands[0],0, 0), 0); + rtx arg2 = XEXP (XVECEXP (operands[0],0, 1), 0); + +- emit_call_insn (gen_sibcall_value_multiple_internal (arg1, target, ++ if (GET_CODE (target) == LO_SUM) ++ emit_call_insn (gen_sibcall_value_multiple_internal_1 (Pmode, arg1, ++ XEXP (target, 0), ++ XEXP (target, 1), ++ operands[2], ++ arg2)); ++ else ++ emit_call_insn (gen_sibcall_value_multiple_internal (arg1, target, + operands[2], + arg2)); + } +@@ -2916,7 +2948,13 @@ + if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 1) + operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); + +- emit_call_insn (gen_sibcall_value_internal (operands[0], target, ++ if (GET_CODE (target) == LO_SUM) ++ emit_call_insn (gen_sibcall_value_internal_1 (Pmode, operands[0], ++ XEXP (target, 0), ++ XEXP (target, 1), ++ operands[2])); ++ else ++ emit_call_insn (gen_sibcall_value_internal (operands[0], target, + operands[2])); + } + DONE; +@@ -2933,6 +2971,15 @@ + b\t%%plt(%1)" + [(set_attr "jirl" "indirect,direct,direct")]) + ++(define_insn "@sibcall_value_internal_1" ++ [(set (match_operand 0 "register_operand" "") ++ (call (mem:P (lo_sum:P (match_operand:P 1 "register_operand" "j") ++ (match_operand:P 2 "symbolic_operand" ""))) ++ (match_operand 3 "" "")))] ++ "SIBLING_CALL_P (insn) && TARGET_CMODEL_MEDIUM" ++ "jirl\t$r0,%1,%%pc_lo12(%2)" ++ [(set_attr "jirl" "indirect")]) ++ + (define_insn "sibcall_value_multiple_internal" + [(set (match_operand 0 "register_operand" "") + (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,b")) +@@ -2947,6 +2994,21 @@ + b\t%%plt(%1)" + [(set_attr "jirl" "indirect,direct,direct")]) + ++(define_insn "@sibcall_value_multiple_internal_1" ++ [(set (match_operand 0 "register_operand" "") ++ (call (mem:P (unspec:P [(match_operand:P 1 "register_operand" "j") ++ (match_operand:P 2 "symbolic_operand" "")] ++ UNSPEC_SIBCALL_VALUE_MULTIPLE_INTERNAL_1)) ++ (match_operand 3 "" ""))) ++ (set (match_operand 4 "register_operand" "") ++ (call (mem:P (unspec:P [(match_dup 1) ++ (match_dup 2)] ++ UNSPEC_SIBCALL_VALUE_MULTIPLE_INTERNAL_1)) ++ (match_dup 3)))] ++ "SIBLING_CALL_P (insn) && TARGET_CMODEL_MEDIUM" ++ "jirl\t$r0,%1,%%pc_lo12(%2)" ++ [(set_attr "jirl" "indirect")]) ++ + (define_expand "call" + [(parallel [(call (match_operand 0 "") + (match_operand 1 "")) +@@ -2956,7 +3018,11 @@ + { + rtx target = loongarch_legitimize_call_address (XEXP (operands[0], 0)); + +- emit_call_insn (gen_call_internal (target, operands[1])); ++ if (GET_CODE (target) == LO_SUM) ++ emit_call_insn (gen_call_internal_1 (Pmode, XEXP (target, 0), ++ XEXP (target, 1), operands[1])); ++ else ++ emit_call_insn (gen_call_internal (target, operands[1])); + DONE; + }) + +@@ -2971,6 +3037,15 @@ + bl\t%%plt(%0)" + [(set_attr "jirl" "indirect,direct,direct")]) + ++(define_insn "@call_internal_1" ++ [(call (mem:P (lo_sum:P (match_operand:P 0 "register_operand" "j") ++ (match_operand:P 1 "symbolic_operand" ""))) ++ (match_operand 2 "" "")) ++ (clobber (reg:SI RETURN_ADDR_REGNUM))] ++ "TARGET_CMODEL_MEDIUM" ++ "jirl\t$r1,%0,%%pc_lo12(%1)" ++ [(set_attr "jirl" "indirect")]) ++ + (define_expand "call_value" + [(parallel [(set (match_operand 0 "") + (call (match_operand 1 "") +@@ -2985,7 +3060,13 @@ + rtx arg1 = XEXP (XVECEXP (operands[0], 0, 0), 0); + rtx arg2 = XEXP (XVECEXP (operands[0], 0, 1), 0); + +- emit_call_insn (gen_call_value_multiple_internal (arg1, target, ++ if (GET_CODE (target) == LO_SUM) ++ emit_call_insn (gen_call_value_multiple_internal_1 (Pmode, arg1, ++ XEXP (target, 0), ++ XEXP (target, 1), ++ operands[2], arg2)); ++ else ++ emit_call_insn (gen_call_value_multiple_internal (arg1, target, + operands[2], arg2)); + } + else +@@ -2994,7 +3075,13 @@ + if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 1) + operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); + +- emit_call_insn (gen_call_value_internal (operands[0], target, ++ if (GET_CODE (target) == LO_SUM) ++ emit_call_insn (gen_call_value_internal_1 (Pmode, operands[0], ++ XEXP (target, 0), ++ XEXP (target, 1), ++ operands[2])); ++ else ++ emit_call_insn (gen_call_value_internal (operands[0], target, + operands[2])); + } + DONE; +@@ -3012,6 +3099,16 @@ + bl\t%%plt(%1)" + [(set_attr "jirl" "indirect,direct,direct")]) + ++(define_insn "@call_value_internal_1" ++ [(set (match_operand 0 "register_operand" "") ++ (call (mem:P (lo_sum:P (match_operand:P 1 "register_operand" "j") ++ (match_operand:P 2 "symbolic_operand" ""))) ++ (match_operand 3 "" ""))) ++ (clobber (reg:SI RETURN_ADDR_REGNUM))] ++ "TARGET_CMODEL_MEDIUM" ++ "jirl\t$r1,%1,%%pc_lo12(%2)" ++ [(set_attr "jirl" "indirect")]) ++ + (define_insn "call_value_multiple_internal" + [(set (match_operand 0 "register_operand" "") + (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,b")) +@@ -3027,6 +3124,22 @@ + bl\t%%plt(%1)" + [(set_attr "jirl" "indirect,direct,direct")]) + ++(define_insn "@call_value_multiple_internal_1" ++ [(set (match_operand 0 "register_operand" "") ++ (call (mem:P (unspec:P [(match_operand:P 1 "register_operand" "j") ++ (match_operand:P 2 "symbolic_operand" "")] ++ UNSPEC_CALL_VALUE_MULTIPLE_INTERNAL_1)) ++ (match_operand 3 "" ""))) ++ (set (match_operand 4 "register_operand" "") ++ (call (mem:P (unspec:P [(match_dup 1) ++ (match_dup 2)] ++ UNSPEC_CALL_VALUE_MULTIPLE_INTERNAL_1)) ++ (match_dup 3))) ++ (clobber (reg:SI RETURN_ADDR_REGNUM))] ++ "TARGET_CMODEL_MEDIUM" ++ "jirl\t$r1,%1,%%pc_lo12(%2)" ++ [(set_attr "jirl" "indirect")]) ++ + + ;; Call subroutine returning any type. + (define_expand "untyped_call" +diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt +index 9df7e1872..639523421 100644 +--- a/gcc/config/loongarch/loongarch.opt ++++ b/gcc/config/loongarch/loongarch.opt +@@ -179,6 +179,9 @@ Enum(cmodel) String(tiny) Value(CMODEL_TINY) + EnumValue + Enum(cmodel) String(tiny-static) Value(CMODEL_TINY_STATIC) + ++EnumValue ++Enum(cmodel) String(medium) Value(CMODEL_MEDIUM) ++ + EnumValue + Enum(cmodel) String(large) Value(CMODEL_LARGE) + +diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md +index e38c6fbdd..8bd0c1376 100644 +--- a/gcc/config/loongarch/predicates.md ++++ b/gcc/config/loongarch/predicates.md +@@ -123,16 +123,27 @@ + if (offset != const0_rtx) + return false; + ++ /* When compiling with '-mcmodel=medium -mexplicit-relocs' ++ symbols are splited in loongarch_legitimize_call_address. ++ ++ When compiling with '-mcmodel=medium -mno-explicit-relocs', ++ first obtain the symbolic address or the address of the ++ plt entry, and then perform an indirect jump, so return false. */ ++ + switch (symbol_type) + { + case SYMBOL_PCREL: +- if (TARGET_CMODEL_EXTREME) ++ if (TARGET_CMODEL_EXTREME ++ || (TARGET_CMODEL_MEDIUM && !TARGET_EXPLICIT_RELOCS)) + return false; + else + return 1; + + case SYMBOL_GOT_DISP: +- if (TARGET_CMODEL_EXTREME || !flag_plt) ++ if (TARGET_CMODEL_EXTREME ++ || !flag_plt ++ || (flag_plt && TARGET_CMODEL_MEDIUM ++ && !TARGET_EXPLICIT_RELOCS)) + return false; + else + return 1; +diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi +index c4f83e62a..2a5592516 100644 +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -24625,6 +24625,9 @@ Set the code model to one of: + The text segment must be within 128MB addressing space. The data segment must + be within 2GB addressing space. + ++@item medium ++The text segment and data segment must be within 2GB addressing space. ++ + @item large (Not implemented yet) + + @item extreme +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-1.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-1.c +new file mode 100644 +index 000000000..276d73e5e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-1.c +@@ -0,0 +1,41 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mno-explicit-relocs -mcmodel=medium" } */ ++/* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test1:.*la\.global\t.*f\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test2:.*la\.local\t.*l\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test3:.*la\.global\t.*\_\_tls\_get\_addr" } } */ ++ ++extern void g (void); ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} ++ ++__attribute__ ((tls_model ("global-dynamic"))) __thread int a; ++ ++void ++test3 (void) ++{ ++ a = 10; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-2.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-2.c +new file mode 100644 +index 000000000..237821c06 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-2.c +@@ -0,0 +1,41 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mno-explicit-relocs -mcmodel=medium" } */ ++/* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test1:.*la\.local\t.*f\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test2:.*la\.local\t.*l\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test3:.*la\.global\t.*\_\_tls\_get\_addr" } } */ ++ ++extern void g (void); ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} ++ ++__attribute__ ((tls_model ("global-dynamic"))) __thread int a; ++ ++void ++test3 (void) ++{ ++ a = 10; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-3.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-3.c +new file mode 100644 +index 000000000..9a6e16103 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-3.c +@@ -0,0 +1,41 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mno-explicit-relocs -mcmodel=medium" } */ ++/* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test1:.*la\.global\t.*f\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test2:.*la\.local\t.*l\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test3:.*la\.global\t.*\_\_tls\_get\_addr" } } */ ++ ++extern void g (void); ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} ++ ++__attribute__ ((tls_model ("global-dynamic"))) __thread int a; ++ ++void ++test3 (void) ++{ ++ a = 10; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-4.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-4.c +new file mode 100644 +index 000000000..2577e3452 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-4.c +@@ -0,0 +1,41 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mno-explicit-relocs -mcmodel=medium" } */ ++/* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test1:.*la\.local\t.*f\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test2:.*la\.local\t.*l\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test3:.*la\.global\t.*\_\_tls\_get\_addr" } } */ ++ ++extern void g (void); ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} ++ ++__attribute__ ((tls_model ("global-dynamic"))) __thread int a; ++ ++void ++test3 (void) ++{ ++ a = 10; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-5.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-5.c +new file mode 100644 +index 000000000..d70b6ea46 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-5.c +@@ -0,0 +1,42 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mexplicit-relocs -mcmodel=medium" } */ ++/* { dg-final { scan-assembler "test:.*pcalau12i.*%pc_hi20\\(g\\)\n\tjirl.*pc_lo12\\(g\\)" } } */ ++/* { dg-final { scan-assembler "test1:.*pcalau12i.*%pc_hi20\\(f\\)\n\tjirl.*%pc_lo12\\(f\\)" } } */ ++/* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20\\(l\\)\n\tjirl.*%pc_lo12\\(l\\)" } } */ ++/* { dg-final { scan-assembler "test3:.*pcalau12i.*%pc_hi20\\(__tls_get_addr\\)\n\t.*\n\tjirl.*%pc_lo12\\(__tls_get_addr\\)" } } */ ++ ++extern void g (void); ++ ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} ++ ++__attribute__ ((tls_model ("global-dynamic"))) __thread int a; ++ ++void ++test3 (void) ++{ ++ a = 10; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-6.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-6.c +new file mode 100644 +index 000000000..f963a9944 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-6.c +@@ -0,0 +1,42 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mexplicit-relocs -mcmodel=medium" } */ ++/* { dg-final { scan-assembler "test:.*pcalau12i.*%pc_hi20\\(g\\)\n\tjirl.*pc_lo12\\(g\\)" } } */ ++/* { dg-final { scan-assembler "test1:.*pcalau12i.*%pc_hi20\\(f\\)\n\tjirl.*%pc_lo12\\(f\\)" } } */ ++/* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20\\(l\\)\n\tjirl.*%pc_lo12\\(l\\)" } } */ ++/* { dg-final { scan-assembler "test3:.*pcalau12i.*%pc_hi20\\(__tls_get_addr\\)\n\t.*\n\tjirl.*%pc_lo12\\(__tls_get_addr\\)" } } */ ++ ++extern void g (void); ++ ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} ++ ++__attribute__ ((tls_model ("global-dynamic"))) __thread int a; ++ ++void ++test3 (void) ++{ ++ a = 10; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-7.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-7.c +new file mode 100644 +index 000000000..f2818b2da +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-7.c +@@ -0,0 +1,43 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs -mcmodel=medium" } */ ++/* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test1:.*pcalau12i\t.*%got_pc_hi20\\(f\\)\n\tld\.d\t.*%got_pc_lo12\\(f\\)\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test2:.*pcalau12i\t.*%pc_hi20\\(l\\)\n\tjirl.*%pc_lo12\\(l\\)" } } */ ++/* { dg-final { scan-assembler "test3:.*pcalau12i.*%got_pc_hi20\\(__tls_get_addr\\)\n\tld\.d.*%got_pc_lo12\\(__tls_get_addr\\)" } } */ ++ ++ ++extern void g (void); ++ ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} ++ ++__attribute__ ((tls_model ("global-dynamic"))) __thread int a; ++ ++void ++test3 (void) ++{ ++ a = 10; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-8.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-8.c +new file mode 100644 +index 000000000..7fa873d84 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-8.c +@@ -0,0 +1,42 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs -mcmodel=medium" } */ ++/* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test1:.*pcalau12i\t.*%pc_hi20\\(f\\)\n\tjirl.*%pc_lo12\\(f\\)" } } */ ++/* { dg-final { scan-assembler "test2:.*pcalau12i\t.*%pc_hi20\\(l\\)\n\tjirl.*%pc_lo12\\(l\\)" } } */ ++/* { dg-final { scan-assembler "test3:.*pcalau12i.*%got_pc_hi20\\(__tls_get_addr\\)\n\tld\.d.*%got_pc_lo12\\(__tls_get_addr\\)" } } */ ++ ++extern void g (void); ++ ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} ++ ++__attribute__ ((tls_model ("global-dynamic"))) __thread int a; ++ ++void ++test3 (void) ++{ ++ a = 10; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c b/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c +index 32a0acf9b..375663286 100644 +--- a/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c ++++ b/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c +@@ -1,6 +1,6 @@ + /* { dg-do compile } */ +-/* { dg-options "-O2 -fno-plt -mcmodel=normal" } */ +-/* { dg-final { scan-assembler "pcalau12i\t.*%got_pc_hi20\\(__tls_get_addr\\)" } } */ ++/* { dg-options "-O0 -fno-plt -mcmodel=normal -mexplicit-relocs" } */ ++/* { dg-final { scan-assembler "pcalau12i\t.*%got_pc_hi20\\(__tls_get_addr\\)\n\tld\.d.*%got_pc_lo12\\(__tls_get_addr\\)" } } */ + + __attribute__ ((tls_model ("global-dynamic"))) __thread int a; + +-- +2.33.0 + diff --git a/LoongArch-Add-prefetch-instructions.patch b/LoongArch-Add-prefetch-instructions.patch new file mode 100644 index 0000000000000000000000000000000000000000..a66d87cec5921ed05047d79ce5b4776b010fc473 --- /dev/null +++ b/LoongArch-Add-prefetch-instructions.patch @@ -0,0 +1,158 @@ +From 52a41006c2e8141a42de93ffcc2c040e034244b2 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Wed, 16 Nov 2022 09:25:14 +0800 +Subject: [PATCH 031/124] LoongArch: Add prefetch instructions. + +Enable sw prefetching at -O3 and higher. + +Co-Authored-By: xujiahao + +gcc/ChangeLog: + + * config/loongarch/constraints.md (ZD): New constraint. + * config/loongarch/loongarch-def.c: Initial number of parallel prefetch. + * config/loongarch/loongarch-tune.h (struct loongarch_cache): + Define number of parallel prefetch. + * config/loongarch/loongarch.cc (loongarch_option_override_internal): + Set up parameters to be used in prefetching algorithm. + * config/loongarch/loongarch.md (prefetch): New template. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/constraints.md | 10 ++++++++++ + gcc/config/loongarch/loongarch-def.c | 2 ++ + gcc/config/loongarch/loongarch-tune.h | 1 + + gcc/config/loongarch/loongarch.cc | 28 +++++++++++++++++++++++++++ + gcc/config/loongarch/loongarch.md | 14 ++++++++++++++ + 5 files changed, 55 insertions(+) + +diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md +index 43cb7b5f0..46f7f63ae 100644 +--- a/gcc/config/loongarch/constraints.md ++++ b/gcc/config/loongarch/constraints.md +@@ -86,6 +86,10 @@ + ;; "ZB" + ;; "An address that is held in a general-purpose register. + ;; The offset is zero" ++;; "ZD" ++;; "An address operand whose address is formed by a base register ++;; and offset that is suitable for use in instructions with the same ++;; addressing mode as @code{preld}." + ;; "<" "Matches a pre-dec or post-dec operand." (Global non-architectural) + ;; ">" "Matches a pre-inc or post-inc operand." (Global non-architectural) + +@@ -190,3 +194,9 @@ + The offset is zero" + (and (match_code "mem") + (match_test "REG_P (XEXP (op, 0))"))) ++ ++(define_address_constraint "ZD" ++ "An address operand whose address is formed by a base register ++ and offset that is suitable for use in instructions with the same ++ addressing mode as @code{preld}." ++ (match_test "loongarch_12bit_offset_address_p (op, mode)")) +diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c +index cbf995d81..80ab10a52 100644 +--- a/gcc/config/loongarch/loongarch-def.c ++++ b/gcc/config/loongarch/loongarch-def.c +@@ -62,11 +62,13 @@ loongarch_cpu_cache[N_TUNE_TYPES] = { + .l1d_line_size = 64, + .l1d_size = 64, + .l2d_size = 256, ++ .simultaneous_prefetches = 4, + }, + [CPU_LA464] = { + .l1d_line_size = 64, + .l1d_size = 64, + .l2d_size = 256, ++ .simultaneous_prefetches = 4, + }, + }; + +diff --git a/gcc/config/loongarch/loongarch-tune.h b/gcc/config/loongarch/loongarch-tune.h +index 6f3530f5c..8e3eb2947 100644 +--- a/gcc/config/loongarch/loongarch-tune.h ++++ b/gcc/config/loongarch/loongarch-tune.h +@@ -45,6 +45,7 @@ struct loongarch_cache { + int l1d_line_size; /* bytes */ + int l1d_size; /* KiB */ + int l2d_size; /* kiB */ ++ int simultaneous_prefetches; /* number of parallel prefetch */ + }; + + #endif /* LOONGARCH_TUNE_H */ +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index d552b162a..622c9435b 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -63,6 +63,7 @@ along with GCC; see the file COPYING3. If not see + #include "context.h" + #include "builtins.h" + #include "rtl-iter.h" ++#include "opts.h" + + /* This file should be included last. */ + #include "target-def.h" +@@ -6099,6 +6100,33 @@ loongarch_option_override_internal (struct gcc_options *opts) + if (loongarch_branch_cost == 0) + loongarch_branch_cost = loongarch_cost->branch_cost; + ++ /* Set up parameters to be used in prefetching algorithm. */ ++ int simultaneous_prefetches ++ = loongarch_cpu_cache[LARCH_ACTUAL_TUNE].simultaneous_prefetches; ++ ++ SET_OPTION_IF_UNSET (opts, &global_options_set, ++ param_simultaneous_prefetches, ++ simultaneous_prefetches); ++ ++ SET_OPTION_IF_UNSET (opts, &global_options_set, ++ param_l1_cache_line_size, ++ loongarch_cpu_cache[LARCH_ACTUAL_TUNE].l1d_line_size); ++ ++ SET_OPTION_IF_UNSET (opts, &global_options_set, ++ param_l1_cache_size, ++ loongarch_cpu_cache[LARCH_ACTUAL_TUNE].l1d_size); ++ ++ SET_OPTION_IF_UNSET (opts, &global_options_set, ++ param_l2_cache_size, ++ loongarch_cpu_cache[LARCH_ACTUAL_TUNE].l2d_size); ++ ++ ++ /* Enable sw prefetching at -O3 and higher. */ ++ if (opts->x_flag_prefetch_loop_arrays < 0 ++ && (opts->x_optimize >= 3 || opts->x_flag_profile_use) ++ && !opts->x_optimize_size) ++ opts->x_flag_prefetch_loop_arrays = 1; ++ + if (TARGET_DIRECT_EXTERN_ACCESS && flag_shlib) + error ("%qs cannot be used for compiling a shared library", + "-mdirect-extern-access"); +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 682ab9617..2fda53819 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -3282,6 +3282,20 @@ + ;; .................... + ;; + ++(define_insn "prefetch" ++ [(prefetch (match_operand 0 "address_operand" "ZD") ++ (match_operand 1 "const_int_operand" "n") ++ (match_operand 2 "const_int_operand" "n"))] ++ "" ++{ ++ switch (INTVAL (operands[1])) ++ { ++ case 0: return "preld\t0,%a0"; ++ case 1: return "preld\t8,%a0"; ++ default: gcc_unreachable (); ++ } ++}) ++ + (define_insn "nop" + [(const_int 0)] + "" +-- +2.33.0 + diff --git a/LoongArch-Add-support-code-model-extreme.patch b/LoongArch-Add-support-code-model-extreme.patch new file mode 100644 index 0000000000000000000000000000000000000000..c444055581defdc1a2a2318980b3ea8bf1ef7ab6 --- /dev/null +++ b/LoongArch-Add-support-code-model-extreme.patch @@ -0,0 +1,794 @@ +From b1c92fb9dab678e4c9c23fa77185011494d145b9 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Thu, 18 Aug 2022 17:26:13 +0800 +Subject: [PATCH 011/124] LoongArch: Add support code model extreme. + +Use five instructions to calculate a signed 64-bit offset relative to the pc. + +gcc/ChangeLog: + + * config/loongarch/loongarch-opts.cc: Allow cmodel to be extreme. + * config/loongarch/loongarch.cc (loongarch_call_tls_get_addr): + Add extreme support for TLS GD and LD types. + (loongarch_legitimize_tls_address): Add extreme support for TLS LE + and IE. + (loongarch_split_symbol): When compiling with -mcmodel=extreme, + the symbol address will be obtained through five instructions. + (loongarch_print_operand_reloc): Add support. + (loongarch_print_operand): Add support. + (loongarch_print_operand_address): Add support. + (loongarch_option_override_internal): Set '-mcmodel=extreme' option + incompatible with '-mno-explicit-relocs'. + * config/loongarch/loongarch.md (@lui_l_hi20): + Loads bits 12-31 of data into registers. + (lui_h_lo20): Load bits 32-51 of the data and spell bits 0-31 of + the source register. + (lui_h_hi12): Load bits 52-63 of the data and spell bits 0-51 of + the source register. + * config/loongarch/predicates.md: Symbols need to be decomposed + when defining the macro TARGET_CMODEL_EXTREME + * doc/invoke.texi: Modify the description information of cmodel in the document. + Document -W[no-]extreme-plt. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/func-call-1.c: Add option '-mcmodel=normal'. + * gcc.target/loongarch/func-call-2.c: Likewise. + * gcc.target/loongarch/func-call-3.c: Likewise. + * gcc.target/loongarch/func-call-4.c: Likewise. + * gcc.target/loongarch/func-call-5.c: Likewise. + * gcc.target/loongarch/func-call-6.c: Likewise. + * gcc.target/loongarch/func-call-7.c: Likewise. + * gcc.target/loongarch/func-call-8.c: Likewise. + * gcc.target/loongarch/relocs-symbol-noaddend.c: Likewise. + * gcc.target/loongarch/func-call-extreme-1.c: New test. + * gcc.target/loongarch/func-call-extreme-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch-opts.cc | 3 +- + gcc/config/loongarch/loongarch.cc | 222 +++++++++++++++--- + gcc/config/loongarch/loongarch.md | 34 ++- + gcc/config/loongarch/predicates.md | 9 +- + gcc/doc/invoke.texi | 50 +--- + .../gcc.target/loongarch/func-call-1.c | 2 +- + .../gcc.target/loongarch/func-call-2.c | 2 +- + .../gcc.target/loongarch/func-call-3.c | 2 +- + .../gcc.target/loongarch/func-call-4.c | 2 +- + .../gcc.target/loongarch/func-call-5.c | 2 +- + .../gcc.target/loongarch/func-call-6.c | 2 +- + .../gcc.target/loongarch/func-call-7.c | 2 +- + .../gcc.target/loongarch/func-call-8.c | 2 +- + .../loongarch/func-call-extreme-1.c | 32 +++ + .../loongarch/func-call-extreme-2.c | 32 +++ + .../loongarch/relocs-symbol-noaddend.c | 2 +- + 16 files changed, 318 insertions(+), 82 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-extreme-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-extreme-2.c + +diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc +index 3f70943de..2ae89f234 100644 +--- a/gcc/config/loongarch/loongarch-opts.cc ++++ b/gcc/config/loongarch/loongarch-opts.cc +@@ -376,14 +376,13 @@ fallback: + + /* 5. Target code model */ + t.cmodel = constrained.cmodel ? opt_cmodel : CMODEL_NORMAL; +- if (t.cmodel != CMODEL_NORMAL) ++ if (t.cmodel != CMODEL_NORMAL && t.cmodel != CMODEL_EXTREME) + { + warning (0, "%qs is not supported, now cmodel is set to %qs", + loongarch_cmodel_strings[t.cmodel], "normal"); + t.cmodel = CMODEL_NORMAL; + } + +- + /* Cleanup and return. */ + obstack_free (&msg_obstack, NULL); + *target = t; +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 76bf55ea4..1a33f668f 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -2436,7 +2436,19 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0) + /* Split tls symbol to high and low. */ + rtx high = gen_rtx_HIGH (Pmode, copy_rtx (loc)); + high = loongarch_force_temporary (tmp, high); +- emit_insn (gen_tls_low (Pmode, a0, high, loc)); ++ ++ if (TARGET_CMODEL_EXTREME) ++ { ++ gcc_assert (TARGET_EXPLICIT_RELOCS); ++ ++ rtx tmp1 = gen_reg_rtx (Pmode); ++ emit_insn (gen_tls_low (Pmode, tmp1, gen_rtx_REG (Pmode, 0), loc)); ++ emit_insn (gen_lui_h_lo20 (tmp1, tmp1, loc)); ++ emit_insn (gen_lui_h_hi12 (tmp1, tmp1, loc)); ++ emit_move_insn (a0, gen_rtx_PLUS (Pmode, high, tmp1)); ++ } ++ else ++ emit_insn (gen_tls_low (Pmode, a0, high, loc)); + } + else + { +@@ -2449,14 +2461,44 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0) + } + + if (flag_plt) +- insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol, ++ insn = emit_call_insn (gen_call_value_internal (v0, ++ loongarch_tls_symbol, + const0_rtx)); + else + { + rtx dest = gen_reg_rtx (Pmode); +- rtx high = gen_reg_rtx (Pmode); +- loongarch_emit_move (high, gen_rtx_HIGH (Pmode, loongarch_tls_symbol)); +- emit_insn (gen_ld_from_got (Pmode, dest, high, loongarch_tls_symbol)); ++ ++ if (TARGET_CMODEL_EXTREME) ++ { ++ gcc_assert (TARGET_EXPLICIT_RELOCS); ++ ++ rtx tmp1 = gen_reg_rtx (Pmode); ++ rtx high = gen_reg_rtx (Pmode); ++ ++ loongarch_emit_move (high, ++ gen_rtx_HIGH (Pmode, loongarch_tls_symbol)); ++ loongarch_emit_move (tmp1, gen_rtx_LO_SUM (Pmode, ++ gen_rtx_REG (Pmode, 0), ++ loongarch_tls_symbol)); ++ emit_insn (gen_lui_h_lo20 (tmp1, tmp1, loongarch_tls_symbol)); ++ emit_insn (gen_lui_h_hi12 (tmp1, tmp1, loongarch_tls_symbol)); ++ loongarch_emit_move (dest, ++ gen_rtx_MEM (Pmode, ++ gen_rtx_PLUS (Pmode, high, tmp1))); ++ } ++ else ++ { ++ if (TARGET_EXPLICIT_RELOCS) ++ { ++ rtx high = gen_reg_rtx (Pmode); ++ loongarch_emit_move (high, ++ gen_rtx_HIGH (Pmode, loongarch_tls_symbol)); ++ emit_insn (gen_ld_from_got (Pmode, dest, high, ++ loongarch_tls_symbol)); ++ } ++ else ++ loongarch_emit_move (dest, loongarch_tls_symbol); ++ } + insn = emit_call_insn (gen_call_value_internal (v0, dest, const0_rtx)); + } + +@@ -2508,7 +2550,23 @@ loongarch_legitimize_tls_address (rtx loc) + tmp3 = gen_reg_rtx (Pmode); + rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2)); + high = loongarch_force_temporary (tmp3, high); +- emit_insn (gen_ld_from_got (Pmode, tmp1, high, tmp2)); ++ ++ if (TARGET_CMODEL_EXTREME) ++ { ++ gcc_assert (TARGET_EXPLICIT_RELOCS); ++ ++ rtx tmp3 = gen_reg_rtx (Pmode); ++ emit_insn (gen_tls_low (Pmode, tmp3, ++ gen_rtx_REG (Pmode, 0), tmp2)); ++ emit_insn (gen_lui_h_lo20 (tmp3, tmp3, tmp2)); ++ emit_insn (gen_lui_h_hi12 (tmp3, tmp3, tmp2)); ++ emit_move_insn (tmp1, ++ gen_rtx_MEM (Pmode, ++ gen_rtx_PLUS (Pmode, ++ high, tmp3))); ++ } ++ else ++ emit_insn (gen_ld_from_got (Pmode, tmp1, high, tmp2)); + } + else + emit_insn (loongarch_got_load_tls_ie (tmp1, loc)); +@@ -2530,11 +2588,18 @@ loongarch_legitimize_tls_address (rtx loc) + rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2)); + high = loongarch_force_temporary (tmp3, high); + emit_insn (gen_ori_l_lo12 (Pmode, tmp1, high, tmp2)); ++ ++ if (TARGET_CMODEL_EXTREME) ++ { ++ gcc_assert (TARGET_EXPLICIT_RELOCS); ++ ++ emit_insn (gen_lui_h_lo20 (tmp1, tmp1, tmp2)); ++ emit_insn (gen_lui_h_hi12 (tmp1, tmp1, tmp2)); ++ } + } + else + emit_insn (loongarch_got_load_tls_le (tmp1, loc)); + emit_insn (gen_add3_insn (dest, tmp1, tp)); +- + } + break; + +@@ -2603,7 +2668,6 @@ bool + loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out) + { + enum loongarch_symbol_type symbol_type; +- rtx high; + + /* If build with '-mno-explicit-relocs', don't split symbol. */ + if (!TARGET_EXPLICIT_RELOCS) +@@ -2615,6 +2679,8 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out) + || !loongarch_split_symbol_type (symbol_type)) + return false; + ++ rtx high, temp1 = NULL; ++ + if (temp == NULL) + temp = gen_reg_rtx (Pmode); + +@@ -2622,20 +2688,42 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out) + high = gen_rtx_HIGH (Pmode, copy_rtx (addr)); + high = loongarch_force_temporary (temp, high); + ++ if (TARGET_CMODEL_EXTREME && can_create_pseudo_p ()) ++ { ++ gcc_assert (TARGET_EXPLICIT_RELOCS); ++ ++ temp1 = gen_reg_rtx (Pmode); ++ emit_move_insn (temp1, gen_rtx_LO_SUM (Pmode, gen_rtx_REG (Pmode, 0), ++ addr)); ++ emit_insn (gen_lui_h_lo20 (temp1, temp1, addr)); ++ emit_insn (gen_lui_h_hi12 (temp1, temp1, addr)); ++ } ++ + if (low_out) + switch (symbol_type) + { + case SYMBOL_PCREL: +- *low_out = gen_rtx_LO_SUM (Pmode, high, addr); +- break; ++ { ++ if (TARGET_CMODEL_EXTREME && can_create_pseudo_p ()) ++ *low_out = gen_rtx_PLUS (Pmode, high, temp1); ++ else ++ *low_out = gen_rtx_LO_SUM (Pmode, high, addr); ++ break; ++ } + + case SYMBOL_GOT_DISP: + /* SYMBOL_GOT_DISP symbols are loaded from the GOT. */ + { +- rtx low = gen_rtx_LO_SUM (Pmode, high, addr); +- rtx mem = gen_rtx_MEM (Pmode, low); +- *low_out = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, mem), +- UNSPEC_LOAD_FROM_GOT); ++ if (TARGET_CMODEL_EXTREME && can_create_pseudo_p ()) ++ *low_out = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, high, temp1)); ++ else ++ { ++ rtx low = gen_rtx_LO_SUM (Pmode, high, addr); ++ rtx mem = gen_rtx_MEM (Pmode, low); ++ *low_out = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, mem), ++ UNSPEC_LOAD_FROM_GOT); ++ } ++ + break; + } + +@@ -4584,34 +4672,86 @@ loongarch_memmodel_needs_release_fence (enum memmodel model) + in context CONTEXT. HI_RELOC indicates a high-part reloc. */ + + static void +-loongarch_print_operand_reloc (FILE *file, rtx op, bool hi_reloc) ++loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part, ++ bool hi_reloc) + { + const char *reloc; + ++ if (TARGET_CMODEL_EXTREME) ++ gcc_assert (TARGET_EXPLICIT_RELOCS); ++ + switch (loongarch_classify_symbolic_expression (op)) + { + case SYMBOL_PCREL: +- reloc = hi_reloc ? "%pc_hi20" : "%pc_lo12"; ++ if (hi64_part) ++ { ++ if (TARGET_CMODEL_EXTREME) ++ reloc = hi_reloc ? "%pc64_hi12" : "%pc64_lo20"; ++ else ++ gcc_unreachable (); ++ } ++ else ++ reloc = hi_reloc ? "%pc_hi20" : "%pc_lo12"; + break; + + case SYMBOL_GOT_DISP: +- reloc = hi_reloc ? "%got_pc_hi20" : "%got_pc_lo12"; ++ if (hi64_part) ++ { ++ if (TARGET_CMODEL_EXTREME) ++ reloc = hi_reloc ? "%got64_pc_hi12" : "%got64_pc_lo20"; ++ else ++ gcc_unreachable (); ++ } ++ else ++ reloc = hi_reloc ? "%got_pc_hi20" : "%got_pc_lo12"; + break; + + case SYMBOL_TLS_IE: +- reloc = hi_reloc ? "%ie_pc_hi20" : "%ie_pc_lo12"; ++ if (hi64_part) ++ { ++ if (TARGET_CMODEL_EXTREME) ++ reloc = hi_reloc ? "%ie64_pc_hi12" : "%ie64_pc_lo20"; ++ else ++ gcc_unreachable (); ++ } ++ else ++ reloc = hi_reloc ? "%ie_pc_hi20" : "%ie_pc_lo12"; + break; + + case SYMBOL_TLS_LE: +- reloc = hi_reloc ? "%le_hi20" : "%le_lo12"; ++ if (hi64_part) ++ { ++ if (TARGET_CMODEL_EXTREME) ++ reloc = hi_reloc ? "%le64_hi12" : "%le64_lo20"; ++ else ++ gcc_unreachable (); ++ } ++ else ++ reloc = hi_reloc ? "%le_hi20" : "%le_lo12"; + break; + + case SYMBOL_TLSGD: +- reloc = hi_reloc ? "%gd_pc_hi20" : "%got_pc_lo12"; ++ if (hi64_part) ++ { ++ if (TARGET_CMODEL_EXTREME) ++ reloc = hi_reloc ? "%got64_pc_hi12" : "%got64_pc_lo20"; ++ else ++ gcc_unreachable (); ++ } ++ else ++ reloc = hi_reloc ? "%gd_pc_hi20" : "%got_pc_lo12"; + break; + + case SYMBOL_TLSLDM: +- reloc = hi_reloc ? "%ld_pc_hi20" : "%got_pc_lo12"; ++ if (hi64_part) ++ { ++ if (TARGET_CMODEL_EXTREME) ++ reloc = hi_reloc ? "%got64_pc_hi12" : "%got64_pc_lo20"; ++ else ++ gcc_unreachable (); ++ } ++ else ++ reloc = hi_reloc ? "%ld_pc_hi20" : "%got_pc_lo12"; + break; + + default: +@@ -4637,6 +4777,8 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi_reloc) + 'L' Print the low-part relocation associated with OP. + 'm' Print one less than CONST_INT OP in decimal. + 'N' Print the inverse of the integer branch condition for comparison OP. ++ 'r' Print address 12-31bit relocation associated with OP. ++ 'R' Print address 32-51bit relocation associated with OP. + 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...), + 'z' for (eq:?I ...), 'n' for (ne:?I ...). + 't' Like 'T', but with the EQ/NE cases reversed +@@ -4694,7 +4836,13 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + case 'h': + if (code == HIGH) + op = XEXP (op, 0); +- loongarch_print_operand_reloc (file, op, true /* hi_reloc */); ++ loongarch_print_operand_reloc (file, op, false /* hi64_part */, ++ true /* hi_reloc */); ++ break; ++ ++ case 'H': ++ loongarch_print_operand_reloc (file, op, true /* hi64_part */, ++ true /* hi_reloc */); + break; + + case 'i': +@@ -4703,7 +4851,8 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + break; + + case 'L': +- loongarch_print_operand_reloc (file, op, false /* lo_reloc */); ++ loongarch_print_operand_reloc (file, op, false /* hi64_part*/, ++ false /* lo_reloc */); + break; + + case 'm': +@@ -4718,6 +4867,16 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + letter); + break; + ++ case 'r': ++ loongarch_print_operand_reloc (file, op, false /* hi64_part */, ++ true /* lo_reloc */); ++ break; ++ ++ case 'R': ++ loongarch_print_operand_reloc (file, op, true /* hi64_part */, ++ false /* lo_reloc */); ++ break; ++ + case 't': + case 'T': + { +@@ -4848,7 +5007,8 @@ loongarch_print_operand_address (FILE *file, machine_mode /* mode */, rtx x) + + case ADDRESS_LO_SUM: + fprintf (file, "%s,", reg_names[REGNO (addr.reg)]); +- loongarch_print_operand_reloc (file, addr.offset, false /* hi_reloc */); ++ loongarch_print_operand_reloc (file, addr.offset, false /* hi64_part */, ++ false /* hi_reloc */); + return; + + case ADDRESS_CONST_INT: +@@ -5821,13 +5981,21 @@ loongarch_option_override_internal (struct gcc_options *opts) + + switch (la_target.cmodel) + { +- case CMODEL_TINY_STATIC: + case CMODEL_EXTREME: ++ if (!TARGET_EXPLICIT_RELOCS) ++ error ("code model %qs needs %s", ++ "extreme", "-mexplicit-relocs"); ++ + if (opts->x_flag_plt) +- error ("code model %qs and %qs not support %s mode", +- "tiny-static", "extreme", "plt"); ++ { ++ if (global_options_set.x_flag_plt) ++ error ("code model %qs is not compatible with %s", ++ "extreme", "-fplt"); ++ opts->x_flag_plt = 0; ++ } + break; + ++ case CMODEL_TINY_STATIC: + case CMODEL_NORMAL: + case CMODEL_TINY: + case CMODEL_LARGE: +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 8e8868de9..8fc10444c 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -60,6 +60,9 @@ + + UNSPEC_LOAD_FROM_GOT + UNSPEC_ORI_L_LO12 ++ UNSPEC_LUI_L_HI20 ++ UNSPEC_LUI_H_LO20 ++ UNSPEC_LUI_H_HI12 + UNSPEC_TLS_LOW + ]) + +@@ -1934,16 +1937,45 @@ + [(set_attr "type" "move")] + ) + ++(define_insn "@lui_l_hi20" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec:P [(match_operand:P 1 "symbolic_operand")] ++ UNSPEC_LUI_L_HI20))] ++ "" ++ "lu12i.w\t%0,%r1" ++ [(set_attr "type" "move")] ++) ++ + (define_insn "@ori_l_lo12" + [(set (match_operand:P 0 "register_operand" "=r") + (unspec:P [(match_operand:P 1 "register_operand" "r") +- (match_operand:P 2 "symbolic_operand")] ++ (match_operand:P 2 "symbolic_operand")] + UNSPEC_ORI_L_LO12))] + "" + "ori\t%0,%1,%L2" + [(set_attr "type" "move")] + ) + ++(define_insn "lui_h_lo20" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "0") ++ (match_operand:DI 2 "symbolic_operand")] ++ UNSPEC_LUI_H_LO20))] ++ "TARGET_64BIT" ++ "lu32i.d\t%0,%R2" ++ [(set_attr "type" "move")] ++) ++ ++(define_insn "lui_h_hi12" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "symbolic_operand")] ++ UNSPEC_LUI_H_HI12))] ++ "TARGET_64BIT" ++ "lu52i.d\t%0,%1,%H2" ++ [(set_attr "type" "move")] ++) ++ + ;; Convert floating-point numbers to integers + (define_insn "frint_" + [(set (match_operand:ANYF 0 "register_operand" "=f") +diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md +index cd3528c7c..e38c6fbdd 100644 +--- a/gcc/config/loongarch/predicates.md ++++ b/gcc/config/loongarch/predicates.md +@@ -111,7 +111,7 @@ + (match_code "const,symbol_ref,label_ref") + { + /* Split symbol to high and low if return false. +- If defined TARGET_CMODEL_LARGE, all symbol would be splited, ++ If defined TARGET_CMODEL_EXTREME, all symbol would be splited, + else if offset is not zero, the symbol would be splited. */ + + enum loongarch_symbol_type symbol_type; +@@ -126,10 +126,13 @@ + switch (symbol_type) + { + case SYMBOL_PCREL: +- return 1; ++ if (TARGET_CMODEL_EXTREME) ++ return false; ++ else ++ return 1; + + case SYMBOL_GOT_DISP: +- if (TARGET_CMODEL_LARGE || !flag_plt) ++ if (TARGET_CMODEL_EXTREME || !flag_plt) + return false; + else + return 1; +diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi +index 1de2b2bd4..c4f83e62a 100644 +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -1006,6 +1006,7 @@ Objective-C and Objective-C++ Dialects}. + -mcond-move-float -mno-cond-move-float @gol + -memcpy -mno-memcpy -mstrict-align -mno-strict-align @gol + -mmax-inline-memcpy-size=@var{n} @gol ++-mexplicit-relocs -mno-explicit-relocs @gol + -mcmodel=@var{code-model}} + + @emph{M32R/D Options} +@@ -24617,50 +24618,19 @@ less than or equal to @var{n} bytes. The default value of @var{n} is 1024. + @item -mcmodel=@var{code-model} + Set the code model to one of: + @table @samp +-@item tiny-static +-@itemize @bullet +-@item +-local symbol and global strong symbol: The data section must be within +/-2MiB addressing space. +-The text section must be within +/-128MiB addressing space. +-@item +-global weak symbol: The got table must be within +/-2GiB addressing space. +-@end itemize +- +-@item tiny +-@itemize @bullet +-@item +-local symbol: The data section must be within +/-2MiB addressing space. +-The text section must be within +/-128MiB +-addressing space. +-@item +-global symbol: The got table must be within +/-2GiB addressing space. +-@end itemize ++@item tiny-static (Not implemented yet) ++@item tiny (Not implemented yet) + + @item normal +-@itemize @bullet +-@item +-local symbol: The data section must be within +/-2GiB addressing space. +-The text section must be within +/-128MiB addressing space. +-@item +-global symbol: The got table must be within +/-2GiB addressing space. +-@end itemize ++The text segment must be within 128MB addressing space. The data segment must ++be within 2GB addressing space. + +-@item large +-@itemize @bullet +-@item +-local symbol: The data section must be within +/-2GiB addressing space. +-The text section must be within +/-128GiB addressing space. +-@item +-global symbol: The got table must be within +/-2GiB addressing space. +-@end itemize ++@item large (Not implemented yet) + +-@item extreme(Not implemented yet) +-@itemize @bullet +-@item +-local symbol: The data and text section must be within +/-8EiB addressing space. +-@item +-global symbol: The data got table must be within +/-8EiB addressing space. +-@end itemize ++@item extreme ++This mode does not limit the size of the code segment and data segment. ++The @option{-mcmodel=extreme} option is incompatible with @option{-fplt} and ++@option{-mno-explicit-relocs}. + @end table + The default code model is @code{normal}. + +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-1.c b/gcc/testsuite/gcc.target/loongarch/func-call-1.c +index 01b8ea23f..76bf11b0c 100644 +--- a/gcc/testsuite/gcc.target/loongarch/func-call-1.c ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-1.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mno-explicit-relocs" } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mno-explicit-relocs -mcmodel=normal" } */ + /* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */ + /* { dg-final { scan-assembler "test1:.*bl\t%plt\\(f\\)\n" } } */ + /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-2.c b/gcc/testsuite/gcc.target/loongarch/func-call-2.c +index 4565baaec..4b468fef8 100644 +--- a/gcc/testsuite/gcc.target/loongarch/func-call-2.c ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-2.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mno-explicit-relocs" } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mno-explicit-relocs -mcmodel=normal" } */ + /* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */ + /* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */ + /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-3.c b/gcc/testsuite/gcc.target/loongarch/func-call-3.c +index 4f669a029..dd3a4882d 100644 +--- a/gcc/testsuite/gcc.target/loongarch/func-call-3.c ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-3.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mno-explicit-relocs" } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mno-explicit-relocs -mcmodel=normal" } */ + /* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */ + /* { dg-final { scan-assembler "test1:.*la\.global\t.*f\n\tjirl" } } */ + /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-4.c b/gcc/testsuite/gcc.target/loongarch/func-call-4.c +index 943adb640..f8158ec34 100644 +--- a/gcc/testsuite/gcc.target/loongarch/func-call-4.c ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-4.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mno-explicit-relocs" } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mno-explicit-relocs -mcmodel=normal" } */ + /* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */ + /* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */ + /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-5.c b/gcc/testsuite/gcc.target/loongarch/func-call-5.c +index 2c2a1c8a1..37994af43 100644 +--- a/gcc/testsuite/gcc.target/loongarch/func-call-5.c ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-5.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mexplicit-relocs" } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mexplicit-relocs -mcmodel=normal" } */ + /* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */ + /* { dg-final { scan-assembler "test1:.*bl\t%plt\\(f\\)\n" } } */ + /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-6.c b/gcc/testsuite/gcc.target/loongarch/func-call-6.c +index 4b0e4266e..8e366e376 100644 +--- a/gcc/testsuite/gcc.target/loongarch/func-call-6.c ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-6.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mexplicit-relocs" } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mexplicit-relocs -mcmodel=normal" } */ + /* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */ + /* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */ + /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-7.c b/gcc/testsuite/gcc.target/loongarch/func-call-7.c +index 51792711f..4177c3d96 100644 +--- a/gcc/testsuite/gcc.target/loongarch/func-call-7.c ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-7.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs" } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs -mcmodel=normal" } */ + /* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */ + /* { dg-final { scan-assembler "test1:.*pcalau12i\t.*%got_pc_hi20\\(f\\)\n\tld\.d\t.*%got_pc_lo12\\(f\\)\n\tjirl" } } */ + /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-8.c b/gcc/testsuite/gcc.target/loongarch/func-call-8.c +index 330140d88..4254eaa16 100644 +--- a/gcc/testsuite/gcc.target/loongarch/func-call-8.c ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-8.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs" } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs -mcmodel=normal" } */ + /* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */ + /* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */ + /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-1.c b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-1.c +new file mode 100644 +index 000000000..db1e0f853 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-1.c +@@ -0,0 +1,32 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs -mcmodel=extreme" } */ ++/* { dg-final { scan-assembler "test:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */ ++/* { dg-final { scan-assembler "test1:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */ ++/* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */ ++ ++extern void g (void); ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-2.c b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-2.c +new file mode 100644 +index 000000000..21bf81ae8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-2.c +@@ -0,0 +1,32 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs -mcmodel=extreme" } */ ++/* { dg-final { scan-assembler "test:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */ ++/* { dg-final { scan-assembler "test1:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */ ++/* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */ ++ ++extern void g (void); ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c b/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c +index bfcc9bc33..3ec8bd229 100644 +--- a/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c ++++ b/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mabi=lp64d -mexplicit-relocs -fno-pic -O2" } */ ++/* { dg-options "-mabi=lp64d -mexplicit-relocs -fno-pic -O2 -mcmodel=normal" } */ + /* { dg-final { scan-assembler "pcalau12i.*%pc_hi20\\(\.LANCHOR0\\)\n" } } */ + /* { dg-final { scan-assembler "addi\.d.*%pc_lo12\\(\.LANCHOR0\\)\n" } } */ + /* { dg-final { scan-assembler "ldptr.d\t\\\$r4,.*,0\n" } } */ +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-builtin-functions.patch b/LoongArch-Add-tests-for-ASX-builtin-functions.patch new file mode 100644 index 0000000000000000000000000000000000000000..339904a0ca26b914b3e005cc3ced1503aa2ea15a --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-builtin-functions.patch @@ -0,0 +1,4485 @@ +From fcf63744c4ceaa60cd57ab3c431ec63f690189d4 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 15:59:47 +0800 +Subject: [PATCH 109/124] LoongArch: Add tests for ASX builtin functions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-builtin.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-builtin.c | 4460 +++++++++++++++++ + 1 file changed, 4460 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-builtin.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-builtin.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-builtin.c +new file mode 100644 +index 000000000..b1a903b4a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-builtin.c +@@ -0,0 +1,4460 @@ ++/* Test builtins for LOONGARCH LASX ASE instructions */ ++/* { dg-do compile } */ ++/* { dg-options "-mlasx" } */ ++/* { dg-final { scan-assembler-times "lasx_xvsll_b:.*xvsll\\.b.*lasx_xvsll_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsll_h:.*xvsll\\.h.*lasx_xvsll_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsll_w:.*xvsll\\.w.*lasx_xvsll_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsll_d:.*xvsll\\.d.*lasx_xvsll_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslli_b:.*xvslli\\.b.*lasx_xvslli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslli_h:.*xvslli\\.h.*lasx_xvslli_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslli_w:.*xvslli\\.w.*lasx_xvslli_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslli_d:.*xvslli\\.d.*lasx_xvslli_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsra_b:.*xvsra\\.b.*lasx_xvsra_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsra_h:.*xvsra\\.h.*lasx_xvsra_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsra_w:.*xvsra\\.w.*lasx_xvsra_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsra_d:.*xvsra\\.d.*lasx_xvsra_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrai_b:.*xvsrai\\.b.*lasx_xvsrai_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrai_h:.*xvsrai\\.h.*lasx_xvsrai_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrai_w:.*xvsrai\\.w.*lasx_xvsrai_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrai_d:.*xvsrai\\.d.*lasx_xvsrai_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrar_b:.*xvsrar\\.b.*lasx_xvsrar_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrar_h:.*xvsrar\\.h.*lasx_xvsrar_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrar_w:.*xvsrar\\.w.*lasx_xvsrar_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrar_d:.*xvsrar\\.d.*lasx_xvsrar_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrari_b:.*xvsrari\\.b.*lasx_xvsrari_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrari_h:.*xvsrari\\.h.*lasx_xvsrari_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrari_w:.*xvsrari\\.w.*lasx_xvsrari_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrari_d:.*xvsrari\\.d.*lasx_xvsrari_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrl_b:.*xvsrl\\.b.*lasx_xvsrl_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrl_h:.*xvsrl\\.h.*lasx_xvsrl_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrl_w:.*xvsrl\\.w.*lasx_xvsrl_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrl_d:.*xvsrl\\.d.*lasx_xvsrl_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrli_b:.*xvsrli\\.b.*lasx_xvsrli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrli_h:.*xvsrli\\.h.*lasx_xvsrli_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrli_w:.*xvsrli\\.w.*lasx_xvsrli_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrli_d:.*xvsrli\\.d.*lasx_xvsrli_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlr_b:.*xvsrlr\\.b.*lasx_xvsrlr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlr_h:.*xvsrlr\\.h.*lasx_xvsrlr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlr_w:.*xvsrlr\\.w.*lasx_xvsrlr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlr_d:.*xvsrlr\\.d.*lasx_xvsrlr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlri_b:.*xvsrlri\\.b.*lasx_xvsrlri_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlri_h:.*xvsrlri\\.h.*lasx_xvsrlri_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlri_w:.*xvsrlri\\.w.*lasx_xvsrlri_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlri_d:.*xvsrlri\\.d.*lasx_xvsrlri_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclr_b:.*xvbitclr\\.b.*lasx_xvbitclr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclr_h:.*xvbitclr\\.h.*lasx_xvbitclr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclr_w:.*xvbitclr\\.w.*lasx_xvbitclr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclr_d:.*xvbitclr\\.d.*lasx_xvbitclr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclri_b:.*xvbitclri\\.b.*lasx_xvbitclri_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclri_h:.*xvbitclri\\.h.*lasx_xvbitclri_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclri_w:.*xvbitclri\\.w.*lasx_xvbitclri_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclri_d:.*xvbitclri\\.d.*lasx_xvbitclri_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitset_b:.*xvbitset\\.b.*lasx_xvbitset_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitset_h:.*xvbitset\\.h.*lasx_xvbitset_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitset_w:.*xvbitset\\.w.*lasx_xvbitset_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitset_d:.*xvbitset\\.d.*lasx_xvbitset_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitseti_b:.*xvbitseti\\.b.*lasx_xvbitseti_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitseti_h:.*xvbitseti\\.h.*lasx_xvbitseti_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitseti_w:.*xvbitseti\\.w.*lasx_xvbitseti_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitseti_d:.*xvbitseti\\.d.*lasx_xvbitseti_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrev_b:.*xvbitrev\\.b.*lasx_xvbitrev_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrev_h:.*xvbitrev\\.h.*lasx_xvbitrev_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrev_w:.*xvbitrev\\.w.*lasx_xvbitrev_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrev_d:.*xvbitrev\\.d.*lasx_xvbitrev_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrevi_b:.*xvbitrevi\\.b.*lasx_xvbitrevi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrevi_h:.*xvbitrevi\\.h.*lasx_xvbitrevi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrevi_w:.*xvbitrevi\\.w.*lasx_xvbitrevi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrevi_d:.*xvbitrevi\\.d.*lasx_xvbitrevi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadd_b:.*xvadd\\.b.*lasx_xvadd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadd_h:.*xvadd\\.h.*lasx_xvadd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadd_w:.*xvadd\\.w.*lasx_xvadd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadd_d:.*xvadd\\.d.*lasx_xvadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddi_bu:.*xvaddi\\.bu.*lasx_xvaddi_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddi_hu:.*xvaddi\\.hu.*lasx_xvaddi_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddi_wu:.*xvaddi\\.wu.*lasx_xvaddi_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddi_du:.*xvaddi\\.du.*lasx_xvaddi_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsub_b:.*xvsub\\.b.*lasx_xvsub_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsub_h:.*xvsub\\.h.*lasx_xvsub_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsub_w:.*xvsub\\.w.*lasx_xvsub_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsub_d:.*xvsub\\.d.*lasx_xvsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubi_bu:.*xvsubi\\.bu.*lasx_xvsubi_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubi_hu:.*xvsubi\\.hu.*lasx_xvsubi_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubi_wu:.*xvsubi\\.wu.*lasx_xvsubi_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubi_du:.*xvsubi\\.du.*lasx_xvsubi_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_b:.*xvmax\\.b.*lasx_xvmax_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_h:.*xvmax\\.h.*lasx_xvmax_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_w:.*xvmax\\.w.*lasx_xvmax_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_d:.*xvmax\\.d.*lasx_xvmax_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_b:.*xvmaxi\\.b.*lasx_xvmaxi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_h:.*xvmaxi\\.h.*lasx_xvmaxi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_w:.*xvmaxi\\.w.*lasx_xvmaxi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_d:.*xvmaxi\\.d.*lasx_xvmaxi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_bu:.*xvmax\\.bu.*lasx_xvmax_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_hu:.*xvmax\\.hu.*lasx_xvmax_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_wu:.*xvmax\\.wu.*lasx_xvmax_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_du:.*xvmax\\.du.*lasx_xvmax_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_bu:.*xvmaxi\\.bu.*lasx_xvmaxi_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_hu:.*xvmaxi\\.hu.*lasx_xvmaxi_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_wu:.*xvmaxi\\.wu.*lasx_xvmaxi_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_du:.*xvmaxi\\.du.*lasx_xvmaxi_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_b:.*xvmin\\.b.*lasx_xvmin_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_h:.*xvmin\\.h.*lasx_xvmin_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_w:.*xvmin\\.w.*lasx_xvmin_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_d:.*xvmin\\.d.*lasx_xvmin_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_b:.*xvmini\\.b.*lasx_xvmini_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_h:.*xvmini\\.h.*lasx_xvmini_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_w:.*xvmini\\.w.*lasx_xvmini_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_d:.*xvmini\\.d.*lasx_xvmini_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_bu:.*xvmin\\.bu.*lasx_xvmin_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_hu:.*xvmin\\.hu.*lasx_xvmin_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_wu:.*xvmin\\.wu.*lasx_xvmin_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_du:.*xvmin\\.du.*lasx_xvmin_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_bu:.*xvmini\\.bu.*lasx_xvmini_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_hu:.*xvmini\\.hu.*lasx_xvmini_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_wu:.*xvmini\\.wu.*lasx_xvmini_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_du:.*xvmini\\.du.*lasx_xvmini_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseq_b:.*xvseq\\.b.*lasx_xvseq_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseq_h:.*xvseq\\.h.*lasx_xvseq_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseq_w:.*xvseq\\.w.*lasx_xvseq_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseq_d:.*xvseq\\.d.*lasx_xvseq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseqi_b:.*xvseqi\\.b.*lasx_xvseqi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseqi_h:.*xvseqi\\.h.*lasx_xvseqi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseqi_w:.*xvseqi\\.w.*lasx_xvseqi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseqi_d:.*xvseqi\\.d.*lasx_xvseqi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_b:.*xvslt\\.b.*lasx_xvslt_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_h:.*xvslt\\.h.*lasx_xvslt_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_w:.*xvslt\\.w.*lasx_xvslt_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_d:.*xvslt\\.d.*lasx_xvslt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_b:.*xvslti\\.b.*lasx_xvslti_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_h:.*xvslti\\.h.*lasx_xvslti_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_w:.*xvslti\\.w.*lasx_xvslti_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_d:.*xvslti\\.d.*lasx_xvslti_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_bu:.*xvslt\\.bu.*lasx_xvslt_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_hu:.*xvslt\\.hu.*lasx_xvslt_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_wu:.*xvslt\\.wu.*lasx_xvslt_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_du:.*xvslt\\.du.*lasx_xvslt_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_bu:.*xvslti\\.bu.*lasx_xvslti_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_hu:.*xvslti\\.hu.*lasx_xvslti_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_wu:.*xvslti\\.wu.*lasx_xvslti_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_du:.*xvslti\\.du.*lasx_xvslti_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_b:.*xvsle\\.b.*lasx_xvsle_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_h:.*xvsle\\.h.*lasx_xvsle_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_w:.*xvsle\\.w.*lasx_xvsle_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_d:.*xvsle\\.d.*lasx_xvsle_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_b:.*xvslei\\.b.*lasx_xvslei_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_h:.*xvslei\\.h.*lasx_xvslei_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_w:.*xvslei\\.w.*lasx_xvslei_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_d:.*xvslei\\.d.*lasx_xvslei_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_bu:.*xvsle\\.bu.*lasx_xvsle_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_hu:.*xvsle\\.hu.*lasx_xvsle_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_wu:.*xvsle\\.wu.*lasx_xvsle_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_du:.*xvsle\\.du.*lasx_xvsle_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_bu:.*xvslei\\.bu.*lasx_xvslei_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_hu:.*xvslei\\.hu.*lasx_xvslei_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_wu:.*xvslei\\.wu.*lasx_xvslei_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_du:.*xvslei\\.du.*lasx_xvslei_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_b:.*xvsat\\.b.*lasx_xvsat_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_h:.*xvsat\\.h.*lasx_xvsat_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_w:.*xvsat\\.w.*lasx_xvsat_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_d:.*xvsat\\.d.*lasx_xvsat_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_bu:.*xvsat\\.bu.*lasx_xvsat_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_hu:.*xvsat\\.hu.*lasx_xvsat_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_wu:.*xvsat\\.wu.*lasx_xvsat_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_du:.*xvsat\\.du.*lasx_xvsat_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadda_b:.*xvadda\\.b.*lasx_xvadda_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadda_h:.*xvadda\\.h.*lasx_xvadda_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadda_w:.*xvadda\\.w.*lasx_xvadda_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadda_d:.*xvadda\\.d.*lasx_xvadda_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_b:.*xvsadd\\.b.*lasx_xvsadd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_h:.*xvsadd\\.h.*lasx_xvsadd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_w:.*xvsadd\\.w.*lasx_xvsadd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_d:.*xvsadd\\.d.*lasx_xvsadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_bu:.*xvsadd\\.bu.*lasx_xvsadd_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_hu:.*xvsadd\\.hu.*lasx_xvsadd_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_wu:.*xvsadd\\.wu.*lasx_xvsadd_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_du:.*xvsadd\\.du.*lasx_xvsadd_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_b:.*xvavg\\.b.*lasx_xvavg_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_h:.*xvavg\\.h.*lasx_xvavg_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_w:.*xvavg\\.w.*lasx_xvavg_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_d:.*xvavg\\.d.*lasx_xvavg_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_bu:.*xvavg\\.bu.*lasx_xvavg_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_hu:.*xvavg\\.hu.*lasx_xvavg_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_wu:.*xvavg\\.wu.*lasx_xvavg_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_du:.*xvavg\\.du.*lasx_xvavg_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_b:.*xvavgr\\.b.*lasx_xvavgr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_h:.*xvavgr\\.h.*lasx_xvavgr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_w:.*xvavgr\\.w.*lasx_xvavgr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_d:.*xvavgr\\.d.*lasx_xvavgr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_bu:.*xvavgr\\.bu.*lasx_xvavgr_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_hu:.*xvavgr\\.hu.*lasx_xvavgr_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_wu:.*xvavgr\\.wu.*lasx_xvavgr_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_du:.*xvavgr\\.du.*lasx_xvavgr_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_b:.*xvssub\\.b.*lasx_xvssub_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_h:.*xvssub\\.h.*lasx_xvssub_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_w:.*xvssub\\.w.*lasx_xvssub_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_d:.*xvssub\\.d.*lasx_xvssub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_bu:.*xvssub\\.bu.*lasx_xvssub_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_hu:.*xvssub\\.hu.*lasx_xvssub_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_wu:.*xvssub\\.wu.*lasx_xvssub_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_du:.*xvssub\\.du.*lasx_xvssub_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_b:.*xvabsd\\.b.*lasx_xvabsd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_h:.*xvabsd\\.h.*lasx_xvabsd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_w:.*xvabsd\\.w.*lasx_xvabsd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_d:.*xvabsd\\.d.*lasx_xvabsd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_bu:.*xvabsd\\.bu.*lasx_xvabsd_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_hu:.*xvabsd\\.hu.*lasx_xvabsd_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_wu:.*xvabsd\\.wu.*lasx_xvabsd_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_du:.*xvabsd\\.du.*lasx_xvabsd_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmul_b:.*xvmul\\.b.*lasx_xvmul_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmul_h:.*xvmul\\.h.*lasx_xvmul_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmul_w:.*xvmul\\.w.*lasx_xvmul_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmul_d:.*xvmul\\.d.*lasx_xvmul_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmadd_b:.*xvmadd\\.b.*lasx_xvmadd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmadd_h:.*xvmadd\\.h.*lasx_xvmadd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmadd_w:.*xvmadd\\.w.*lasx_xvmadd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmadd_d:.*xvmadd\\.d.*lasx_xvmadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmsub_b:.*xvmsub\\.b.*lasx_xvmsub_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmsub_h:.*xvmsub\\.h.*lasx_xvmsub_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmsub_w:.*xvmsub\\.w.*lasx_xvmsub_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmsub_d:.*xvmsub\\.d.*lasx_xvmsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_b:.*xvdiv\\.b.*lasx_xvdiv_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_h:.*xvdiv\\.h.*lasx_xvdiv_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_w:.*xvdiv\\.w.*lasx_xvdiv_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_d:.*xvdiv\\.d.*lasx_xvdiv_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_bu:.*xvdiv\\.bu.*lasx_xvdiv_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_hu:.*xvdiv\\.hu.*lasx_xvdiv_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_wu:.*xvdiv\\.wu.*lasx_xvdiv_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_du:.*xvdiv\\.du.*lasx_xvdiv_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_h_b:.*xvhaddw\\.h\\.b.*lasx_xvhaddw_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_w_h:.*xvhaddw\\.w\\.h.*lasx_xvhaddw_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_d_w:.*xvhaddw\\.d\\.w.*lasx_xvhaddw_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_hu_bu:.*xvhaddw\\.hu\\.bu.*lasx_xvhaddw_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_wu_hu:.*xvhaddw\\.wu\\.hu.*lasx_xvhaddw_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_du_wu:.*xvhaddw\\.du\\.wu.*lasx_xvhaddw_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_h_b:.*xvhsubw\\.h\\.b.*lasx_xvhsubw_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_w_h:.*xvhsubw\\.w\\.h.*lasx_xvhsubw_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_d_w:.*xvhsubw\\.d\\.w.*lasx_xvhsubw_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_hu_bu:.*xvhsubw\\.hu\\.bu.*lasx_xvhsubw_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_wu_hu:.*xvhsubw\\.wu\\.hu.*lasx_xvhsubw_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_du_wu:.*xvhsubw\\.du\\.wu.*lasx_xvhsubw_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_b:.*xvmod\\.b.*lasx_xvmod_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_h:.*xvmod\\.h.*lasx_xvmod_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_w:.*xvmod\\.w.*lasx_xvmod_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_d:.*xvmod\\.d.*lasx_xvmod_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_bu:.*xvmod\\.bu.*lasx_xvmod_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_hu:.*xvmod\\.hu.*lasx_xvmod_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_wu:.*xvmod\\.wu.*lasx_xvmod_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_du:.*xvmod\\.du.*lasx_xvmod_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_b:.*xvrepl128vei\\.b.*lasx_xvrepl128vei_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_h:.*xvrepl128vei\\.h.*lasx_xvrepl128vei_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_w:.*xvrepl128vei\\.w.*lasx_xvrepl128vei_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_d:.*xvrepl128vei\\.d.*lasx_xvrepl128vei_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickev_b:.*xvpickev\\.b.*lasx_xvpickev_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickev_h:.*xvpickev\\.h.*lasx_xvpickev_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickev_w:.*xvpickev\\.w.*lasx_xvpickev_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickev_d:.*xvilvl\\.d.*lasx_xvpickev_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickod_b:.*xvpickod\\.b.*lasx_xvpickod_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickod_h:.*xvpickod\\.h.*lasx_xvpickod_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickod_w:.*xvpickod\\.w.*lasx_xvpickod_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickod_d:.*xvilvh\\.d.*lasx_xvpickod_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvh_b:.*xvilvh\\.b.*lasx_xvilvh_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvh_h:.*xvilvh\\.h.*lasx_xvilvh_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvh_w:.*xvilvh\\.w.*lasx_xvilvh_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvh_d:.*xvilvh\\.d.*lasx_xvilvh_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvl_b:.*xvilvl\\.b.*lasx_xvilvl_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvl_h:.*xvilvl\\.h.*lasx_xvilvl_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvl_w:.*xvilvl\\.w.*lasx_xvilvl_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvl_d:.*xvilvl\\.d.*lasx_xvilvl_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackev_b:.*xvpackev\\.b.*lasx_xvpackev_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackev_h:.*xvpackev\\.h.*lasx_xvpackev_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackev_w:.*xvpackev\\.w.*lasx_xvpackev_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackev_d:.*xvilvl\\.d.*lasx_xvpackev_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackod_b:.*xvpackod\\.b.*lasx_xvpackod_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackod_h:.*xvpackod\\.h.*lasx_xvpackod_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackod_w:.*xvpackod\\.w.*lasx_xvpackod_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackod_d:.*xvilvh\\.d.*lasx_xvpackod_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf_b:.*xvshuf\\.b.*lasx_xvshuf_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf_h:.*xvshuf\\.h.*lasx_xvshuf_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf_w:.*xvshuf\\.w.*lasx_xvshuf_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf_d:.*xvshuf\\.d.*lasx_xvshuf_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvand_v:.*xvand\\.v.*lasx_xvand_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvandi_b:.*xvandi\\.b.*lasx_xvandi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvor_v:.*xvor\\.v.*lasx_xvor_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvori_b:.*xvbitseti\\.b.*lasx_xvori_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvnor_v:.*xvnor\\.v.*lasx_xvnor_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvnori_b:.*xvnori\\.b.*lasx_xvnori_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvxor_v:.*xvxor\\.v.*lasx_xvxor_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvxori_b:.*xvbitrevi\\.b.*lasx_xvxori_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitsel_v:.*xvbitsel\\.v.*lasx_xvbitsel_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitseli_b:.*xvbitseli\\.b.*lasx_xvbitseli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf4i_b:.*xvshuf4i\\.b.*lasx_xvshuf4i_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf4i_h:.*xvshuf4i\\.h.*lasx_xvshuf4i_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf4i_w:.*xvshuf4i\\.w.*lasx_xvshuf4i_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_b:.*xvreplgr2vr\\.b.*lasx_xvreplgr2vr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_h:.*xvreplgr2vr\\.h.*lasx_xvreplgr2vr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_w:.*xvreplgr2vr\\.w.*lasx_xvreplgr2vr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_d:.*xvreplgr2vr\\.d.*lasx_xvreplgr2vr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpcnt_b:.*xvpcnt\\.b.*lasx_xvpcnt_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpcnt_h:.*xvpcnt\\.h.*lasx_xvpcnt_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpcnt_w:.*xvpcnt\\.w.*lasx_xvpcnt_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpcnt_d:.*xvpcnt\\.d.*lasx_xvpcnt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclo_b:.*xvclo\\.b.*lasx_xvclo_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclo_h:.*xvclo\\.h.*lasx_xvclo_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclo_w:.*xvclo\\.w.*lasx_xvclo_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclo_d:.*xvclo\\.d.*lasx_xvclo_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclz_b:.*xvclz\\.b.*lasx_xvclz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclz_h:.*xvclz\\.h.*lasx_xvclz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclz_w:.*xvclz\\.w.*lasx_xvclz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclz_d:.*xvclz\\.d.*lasx_xvclz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfadd_s:.*xvfadd\\.s.*lasx_xvfadd_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfadd_d:.*xvfadd\\.d.*lasx_xvfadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfsub_s:.*xvfsub\\.s.*lasx_xvfsub_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfsub_d:.*xvfsub\\.d.*lasx_xvfsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmul_s:.*xvfmul\\.s.*lasx_xvfmul_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmul_d:.*xvfmul\\.d.*lasx_xvfmul_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfdiv_s:.*xvfdiv\\.s.*lasx_xvfdiv_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfdiv_d:.*xvfdiv\\.d.*lasx_xvfdiv_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcvt_h_s:.*xvfcvt\\.h\\.s.*lasx_xvfcvt_h_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcvt_s_d:.*xvfcvt\\.s\\.d.*lasx_xvfcvt_s_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmin_s:.*xvfmin\\.s.*lasx_xvfmin_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmin_d:.*xvfmin\\.d.*lasx_xvfmin_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmina_s:.*xvfmina\\.s.*lasx_xvfmina_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmina_d:.*xvfmina\\.d.*lasx_xvfmina_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmax_s:.*xvfmax\\.s.*lasx_xvfmax_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmax_d:.*xvfmax\\.d.*lasx_xvfmax_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmaxa_s:.*xvfmaxa\\.s.*lasx_xvfmaxa_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmaxa_d:.*xvfmaxa\\.d.*lasx_xvfmaxa_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfclass_s:.*xvfclass\\.s.*lasx_xvfclass_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfclass_d:.*xvfclass\\.d.*lasx_xvfclass_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfsqrt_s:.*xvfsqrt\\.s.*lasx_xvfsqrt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfsqrt_d:.*xvfsqrt\\.d.*lasx_xvfsqrt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrecip_s:.*xvfrecip\\.s.*lasx_xvfrecip_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrecip_d:.*xvfrecip\\.d.*lasx_xvfrecip_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrint_s:.*xvfrint\\.s.*lasx_xvfrint_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrint_d:.*xvfrint\\.d.*lasx_xvfrint_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrsqrt_s:.*xvfrsqrt\\.s.*lasx_xvfrsqrt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrsqrt_d:.*xvfrsqrt\\.d.*lasx_xvfrsqrt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvflogb_s:.*xvflogb\\.s.*lasx_xvflogb_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvflogb_d:.*xvflogb\\.d.*lasx_xvflogb_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcvth_s_h:.*xvfcvth\\.s\\.h.*lasx_xvfcvth_s_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcvth_d_s:.*xvfcvth\\.d\\.s.*lasx_xvfcvth_d_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcvtl_s_h:.*xvfcvtl\\.s\\.h.*lasx_xvfcvtl_s_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcvtl_d_s:.*xvfcvtl\\.d\\.s.*lasx_xvfcvtl_d_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftint_w_s:.*xvftint\\.w\\.s.*lasx_xvftint_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftint_l_d:.*xvftint\\.l\\.d.*lasx_xvftint_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftint_wu_s:.*xvftint\\.wu\\.s.*lasx_xvftint_wu_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftint_lu_d:.*xvftint\\.lu\\.d.*lasx_xvftint_lu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrz_w_s:.*xvftintrz\\.w\\.s.*lasx_xvftintrz_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrz_l_d:.*xvftintrz\\.l\\.d.*lasx_xvftintrz_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrz_wu_s:.*xvftintrz\\.wu\\.s.*lasx_xvftintrz_wu_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrz_lu_d:.*xvftintrz\\.lu\\.d.*lasx_xvftintrz_lu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffint_s_w:.*xvffint\\.s\\.w.*lasx_xvffint_s_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffint_d_l:.*xvffint\\.d\\.l.*lasx_xvffint_d_l" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffint_s_wu:.*xvffint\\.s\\.wu.*lasx_xvffint_s_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffint_d_lu:.*xvffint\\.d\\.lu.*lasx_xvffint_d_lu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve_b:.*xvreplve\\.b.*lasx_xvreplve_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve_h:.*xvreplve\\.h.*lasx_xvreplve_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve_w:.*xvreplve\\.w.*lasx_xvreplve_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve_d:.*xvreplve\\.d.*lasx_xvreplve_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpermi_w:.*xvpermi\\.w.*lasx_xvpermi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvandn_v:.*xvandn\\.v.*lasx_xvandn_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvneg_b:.*xvneg\\.b.*lasx_xvneg_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvneg_h:.*xvneg\\.h.*lasx_xvneg_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvneg_w:.*xvneg\\.w.*lasx_xvneg_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvneg_d:.*xvneg\\.d.*lasx_xvneg_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_b:.*xvmuh\\.b.*lasx_xvmuh_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_h:.*xvmuh\\.h.*lasx_xvmuh_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_w:.*xvmuh\\.w.*lasx_xvmuh_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_d:.*xvmuh\\.d.*lasx_xvmuh_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_bu:.*xvmuh\\.bu.*lasx_xvmuh_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_hu:.*xvmuh\\.hu.*lasx_xvmuh_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_wu:.*xvmuh\\.wu.*lasx_xvmuh_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_du:.*xvmuh\\.du.*lasx_xvmuh_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsllwil_h_b:.*xvsllwil\\.h\\.b.*lasx_xvsllwil_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsllwil_w_h:.*xvsllwil\\.w\\.h.*lasx_xvsllwil_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsllwil_d_w:.*xvsllwil\\.d\\.w.*lasx_xvsllwil_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsllwil_hu_bu:.*xvsllwil\\.hu\\.bu.*lasx_xvsllwil_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsllwil_wu_hu:.*xvsllwil\\.wu\\.hu.*lasx_xvsllwil_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsllwil_du_wu:.*xvsllwil\\.du\\.wu.*lasx_xvsllwil_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsran_b_h:.*xvsran\\.b\\.h.*lasx_xvsran_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsran_h_w:.*xvsran\\.h\\.w.*lasx_xvsran_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsran_w_d:.*xvsran\\.w\\.d.*lasx_xvsran_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssran_b_h:.*xvssran\\.b\\.h.*lasx_xvssran_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssran_h_w:.*xvssran\\.h\\.w.*lasx_xvssran_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssran_w_d:.*xvssran\\.w\\.d.*lasx_xvssran_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssran_bu_h:.*xvssran\\.bu\\.h.*lasx_xvssran_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssran_hu_w:.*xvssran\\.hu\\.w.*lasx_xvssran_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssran_wu_d:.*xvssran\\.wu\\.d.*lasx_xvssran_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarn_b_h:.*xvsrarn\\.b\\.h.*lasx_xvsrarn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarn_h_w:.*xvsrarn\\.h\\.w.*lasx_xvsrarn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarn_w_d:.*xvsrarn\\.w\\.d.*lasx_xvsrarn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarn_b_h:.*xvssrarn\\.b\\.h.*lasx_xvssrarn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarn_h_w:.*xvssrarn\\.h\\.w.*lasx_xvssrarn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarn_w_d:.*xvssrarn\\.w\\.d.*lasx_xvssrarn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarn_bu_h:.*xvssrarn\\.bu\\.h.*lasx_xvssrarn_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarn_hu_w:.*xvssrarn\\.hu\\.w.*lasx_xvssrarn_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarn_wu_d:.*xvssrarn\\.wu\\.d.*lasx_xvssrarn_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrln_b_h:.*xvsrln\\.b\\.h.*lasx_xvsrln_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrln_h_w:.*xvsrln\\.h\\.w.*lasx_xvsrln_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrln_w_d:.*xvsrln\\.w\\.d.*lasx_xvsrln_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrln_bu_h:.*xvssrln\\.bu\\.h.*lasx_xvssrln_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrln_hu_w:.*xvssrln\\.hu\\.w.*lasx_xvssrln_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrln_wu_d:.*xvssrln\\.wu\\.d.*lasx_xvssrln_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrn_b_h:.*xvsrlrn\\.b\\.h.*lasx_xvsrlrn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrn_h_w:.*xvsrlrn\\.h\\.w.*lasx_xvsrlrn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrn_w_d:.*xvsrlrn\\.w\\.d.*lasx_xvsrlrn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_bu_h:.*xvssrlrn\\.bu\\.h.*lasx_xvssrlrn_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_hu_w:.*xvssrlrn\\.hu\\.w.*lasx_xvssrlrn_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_wu_d:.*xvssrlrn\\.wu\\.d.*lasx_xvssrlrn_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrstpi_b:.*xvfrstpi\\.b.*lasx_xvfrstpi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrstpi_h:.*xvfrstpi\\.h.*lasx_xvfrstpi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrstp_b:.*xvfrstp\\.b.*lasx_xvfrstp_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrstp_h:.*xvfrstp\\.h.*lasx_xvfrstp_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf4i_d:.*xvshuf4i\\.d.*lasx_xvshuf4i_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbsrl_v:.*xvbsrl\\.v.*lasx_xvbsrl_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbsll_v:.*xvbsll\\.v.*lasx_xvbsll_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvextrins_b:.*xvextrins\\.b.*lasx_xvextrins_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvextrins_h:.*xvextrins\\.h.*lasx_xvextrins_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvextrins_w:.*xvextrins\\.w.*lasx_xvextrins_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvextrins_d:.*xvextrins\\.d.*lasx_xvextrins_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmskltz_b:.*xvmskltz\\.b.*lasx_xvmskltz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmskltz_h:.*xvmskltz\\.h.*lasx_xvmskltz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmskltz_w:.*xvmskltz\\.w.*lasx_xvmskltz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmskltz_d:.*xvmskltz\\.d.*lasx_xvmskltz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsigncov_b:.*xvsigncov\\.b.*lasx_xvsigncov_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsigncov_h:.*xvsigncov\\.h.*lasx_xvsigncov_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsigncov_w:.*xvsigncov\\.w.*lasx_xvsigncov_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsigncov_d:.*xvsigncov\\.d.*lasx_xvsigncov_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmadd_s:.*xvfmadd\\.s.*lasx_xvfmadd_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmadd_d:.*xvfmadd\\.d.*lasx_xvfmadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmsub_s:.*xvfmsub\\.s.*lasx_xvfmsub_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmsub_d:.*xvfmsub\\.d.*lasx_xvfmsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfnmadd_s:.*xvfnmadd\\.s.*lasx_xvfnmadd_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfnmadd_d:.*xvfnmadd\\.d.*lasx_xvfnmadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfnmsub_s:.*xvfnmsub\\.s.*lasx_xvfnmsub_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfnmsub_d:.*xvfnmsub\\.d.*lasx_xvfnmsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrne_w_s:.*xvftintrne\\.w\\.s.*lasx_xvftintrne_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrne_l_d:.*xvftintrne\\.l\\.d.*lasx_xvftintrne_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrp_w_s:.*xvftintrp\\.w\\.s.*lasx_xvftintrp_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrp_l_d:.*xvftintrp\\.l\\.d.*lasx_xvftintrp_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrm_w_s:.*xvftintrm\\.w\\.s.*lasx_xvftintrm_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrm_l_d:.*xvftintrm\\.l\\.d.*lasx_xvftintrm_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftint_w_d:.*xvftint\\.w\\.d.*lasx_xvftint_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffint_s_l:.*xvffint\\.s\\.l.*lasx_xvffint_s_l" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrz_w_d:.*xvftintrz\\.w\\.d.*lasx_xvftintrz_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrp_w_d:.*xvftintrp\\.w\\.d.*lasx_xvftintrp_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrm_w_d:.*xvftintrm\\.w\\.d.*lasx_xvftintrm_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrne_w_d:.*xvftintrne\\.w\\.d.*lasx_xvftintrne_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftinth_l_s:.*xvftinth\\.l\\.s.*lasx_xvftinth_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintl_l_s:.*xvftintl\\.l\\.s.*lasx_xvftintl_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffinth_d_w:.*xvffinth\\.d\\.w.*lasx_xvffinth_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffintl_d_w:.*xvffintl\\.d\\.w.*lasx_xvffintl_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrzh_l_s:.*xvftintrzh\\.l\\.s.*lasx_xvftintrzh_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrzl_l_s:.*xvftintrzl\\.l\\.s.*lasx_xvftintrzl_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrph_l_s:.*xvftintrph\\.l\\.s.*lasx_xvftintrph_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrpl_l_s:.*xvftintrpl\\.l\\.s.*lasx_xvftintrpl_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrmh_l_s:.*xvftintrmh\\.l\\.s.*lasx_xvftintrmh_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrml_l_s:.*xvftintrml\\.l\\.s.*lasx_xvftintrml_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrneh_l_s:.*xvftintrneh\\.l\\.s.*lasx_xvftintrneh_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrnel_l_s:.*xvftintrnel\\.l\\.s.*lasx_xvftintrnel_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrne_s:.*xvfrintrne\\.s.*lasx_xvfrintrne_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrne_d:.*xvfrintrne\\.d.*lasx_xvfrintrne_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrz_s:.*xvfrintrz\\.s.*lasx_xvfrintrz_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrz_d:.*xvfrintrz\\.d.*lasx_xvfrintrz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrp_s:.*xvfrintrp\\.s.*lasx_xvfrintrp_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrp_d:.*xvfrintrp\\.d.*lasx_xvfrintrp_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrm_s:.*xvfrintrm\\.s.*lasx_xvfrintrm_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrm_d:.*xvfrintrm\\.d.*lasx_xvfrintrm_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvld:.*xvld.*lasx_xvld" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvst:.*xvst.*lasx_xvst" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvstelm_b:.*xvstelm\\.b.*lasx_xvstelm_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvstelm_h:.*xvstelm\\.h.*lasx_xvstelm_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvstelm_w:.*xvstelm\\.w.*lasx_xvstelm_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvstelm_d:.*xvstelm\\.d.*lasx_xvstelm_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvinsve0_w:.*xvinsve0\\.w.*lasx_xvinsve0_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvinsve0_d:.*xvinsve0\\.d.*lasx_xvinsve0_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve_w:.*xvpickve\\.w.*lasx_xvpickve_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve_d:.*xvpickve\\.d.*lasx_xvpickve_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_b_h:.*xvssrlrn\\.b\\.h.*lasx_xvssrlrn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_h_w:.*xvssrlrn\\.h\\.w.*lasx_xvssrlrn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_w_d:.*xvssrlrn\\.w\\.d.*lasx_xvssrlrn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrln_b_h:.*xvssrln\\.b\\.h.*lasx_xvssrln_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrln_h_w:.*xvssrln\\.h\\.w.*lasx_xvssrln_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrln_w_d:.*xvssrln\\.w\\.d.*lasx_xvssrln_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvorn_v:.*xvorn\\.v.*lasx_xvorn_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvldi:.*xvldi.*lasx_xvldi" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvldx:.*xvldx.*lasx_xvldx" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvstx:.*xvstx.*lasx_xvstx" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvextl_qu_du:.*xvextl\\.qu\\.du.*lasx_xvextl_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvinsgr2vr_w:.*xvinsgr2vr\\.w.*lasx_xvinsgr2vr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvinsgr2vr_d:.*xvinsgr2vr\\.d.*lasx_xvinsgr2vr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve0_b:.*xvreplve0\\.b.*lasx_xvreplve0_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve0_h:.*xvreplve0\\.h.*lasx_xvreplve0_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve0_w:.*xvreplve0\\.w.*lasx_xvreplve0_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve0_d:.*xvreplve0\\.d.*lasx_xvreplve0_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve0_q:.*xvreplve0\\.q.*lasx_xvreplve0_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_h_b:.*vext2xv\\.h\\.b.*lasx_vext2xv_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_w_h:.*vext2xv\\.w\\.h.*lasx_vext2xv_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_d_w:.*vext2xv\\.d\\.w.*lasx_vext2xv_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_w_b:.*vext2xv\\.w\\.b.*lasx_vext2xv_w_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_d_h:.*vext2xv\\.d\\.h.*lasx_vext2xv_d_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_d_b:.*vext2xv\\.d\\.b.*lasx_vext2xv_d_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_hu_bu:.*vext2xv\\.hu\\.bu.*lasx_vext2xv_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_wu_hu:.*vext2xv\\.wu\\.hu.*lasx_vext2xv_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_du_wu:.*vext2xv\\.du\\.wu.*lasx_vext2xv_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_wu_bu:.*vext2xv\\.wu\\.bu.*lasx_vext2xv_wu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_du_hu:.*vext2xv\\.du\\.hu.*lasx_vext2xv_du_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_du_bu:.*vext2xv\\.du\\.bu.*lasx_vext2xv_du_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpermi_q:.*xvpermi\\.q.*lasx_xvpermi_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpermi_d:.*xvpermi\\.d.*lasx_xvpermi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvperm_w:.*xvperm\\.w.*lasx_xvperm_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvldrepl_b:.*xvldrepl\\.b.*lasx_xvldrepl_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvldrepl_h:.*xvldrepl\\.h.*lasx_xvldrepl_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvldrepl_w:.*xvldrepl\\.w.*lasx_xvldrepl_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvldrepl_d:.*xvldrepl\\.d.*lasx_xvldrepl_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_w:.*xvpickve2gr\\.w.*lasx_xvpickve2gr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_wu:.*xvpickve2gr\\.wu.*lasx_xvpickve2gr_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_d:.*xvpickve2gr\\.d.*lasx_xvpickve2gr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_du:.*xvpickve2gr\\.du.*lasx_xvpickve2gr_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_q_d:.*xvaddwev\\.q\\.d.*lasx_xvaddwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_d_w:.*xvaddwev\\.d\\.w.*lasx_xvaddwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_w_h:.*xvaddwev\\.w\\.h.*lasx_xvaddwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_h_b:.*xvaddwev\\.h\\.b.*lasx_xvaddwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_q_du:.*xvaddwev\\.q\\.du.*lasx_xvaddwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_d_wu:.*xvaddwev\\.d\\.wu.*lasx_xvaddwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_w_hu:.*xvaddwev\\.w\\.hu.*lasx_xvaddwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_h_bu:.*xvaddwev\\.h\\.bu.*lasx_xvaddwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_q_d:.*xvsubwev\\.q\\.d.*lasx_xvsubwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_d_w:.*xvsubwev\\.d\\.w.*lasx_xvsubwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_w_h:.*xvsubwev\\.w\\.h.*lasx_xvsubwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_h_b:.*xvsubwev\\.h\\.b.*lasx_xvsubwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_q_du:.*xvsubwev\\.q\\.du.*lasx_xvsubwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_d_wu:.*xvsubwev\\.d\\.wu.*lasx_xvsubwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_w_hu:.*xvsubwev\\.w\\.hu.*lasx_xvsubwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_h_bu:.*xvsubwev\\.h\\.bu.*lasx_xvsubwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_q_d:.*xvmulwev\\.q\\.d.*lasx_xvmulwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_d_w:.*xvmulwev\\.d\\.w.*lasx_xvmulwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_w_h:.*xvmulwev\\.w\\.h.*lasx_xvmulwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_h_b:.*xvmulwev\\.h\\.b.*lasx_xvmulwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_q_du:.*xvmulwev\\.q\\.du.*lasx_xvmulwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_d_wu:.*xvmulwev\\.d\\.wu.*lasx_xvmulwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_w_hu:.*xvmulwev\\.w\\.hu.*lasx_xvmulwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_h_bu:.*xvmulwev\\.h\\.bu.*lasx_xvmulwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_q_d:.*xvaddwod\\.q\\.d.*lasx_xvaddwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_d_w:.*xvaddwod\\.d\\.w.*lasx_xvaddwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_w_h:.*xvaddwod\\.w\\.h.*lasx_xvaddwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_h_b:.*xvaddwod\\.h\\.b.*lasx_xvaddwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_q_du:.*xvaddwod\\.q\\.du.*lasx_xvaddwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_d_wu:.*xvaddwod\\.d\\.wu.*lasx_xvaddwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_w_hu:.*xvaddwod\\.w\\.hu.*lasx_xvaddwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_h_bu:.*xvaddwod\\.h\\.bu.*lasx_xvaddwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_q_d:.*xvsubwod\\.q\\.d.*lasx_xvsubwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_d_w:.*xvsubwod\\.d\\.w.*lasx_xvsubwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_w_h:.*xvsubwod\\.w\\.h.*lasx_xvsubwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_h_b:.*xvsubwod\\.h\\.b.*lasx_xvsubwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_q_du:.*xvsubwod\\.q\\.du.*lasx_xvsubwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_d_wu:.*xvsubwod\\.d\\.wu.*lasx_xvsubwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_w_hu:.*xvsubwod\\.w\\.hu.*lasx_xvsubwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_h_bu:.*xvsubwod\\.h\\.bu.*lasx_xvsubwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_q_d:.*xvmulwod\\.q\\.d.*lasx_xvmulwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_d_w:.*xvmulwod\\.d\\.w.*lasx_xvmulwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_w_h:.*xvmulwod\\.w\\.h.*lasx_xvmulwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_h_b:.*xvmulwod\\.h\\.b.*lasx_xvmulwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_q_du:.*xvmulwod\\.q\\.du.*lasx_xvmulwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_d_wu:.*xvmulwod\\.d\\.wu.*lasx_xvmulwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_w_hu:.*xvmulwod\\.w\\.hu.*lasx_xvmulwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_h_bu:.*xvmulwod\\.h\\.bu.*lasx_xvmulwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_d_wu_w:.*xvaddwev\\.d\\.wu\\.w.*lasx_xvaddwev_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_w_hu_h:.*xvaddwev\\.w\\.hu\\.h.*lasx_xvaddwev_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_h_bu_b:.*xvaddwev\\.h\\.bu\\.b.*lasx_xvaddwev_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_d_wu_w:.*xvmulwev\\.d\\.wu\\.w.*lasx_xvmulwev_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_w_hu_h:.*xvmulwev\\.w\\.hu\\.h.*lasx_xvmulwev_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_h_bu_b:.*xvmulwev\\.h\\.bu\\.b.*lasx_xvmulwev_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_d_wu_w:.*xvaddwod\\.d\\.wu\\.w.*lasx_xvaddwod_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_w_hu_h:.*xvaddwod\\.w\\.hu\\.h.*lasx_xvaddwod_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_h_bu_b:.*xvaddwod\\.h\\.bu\\.b.*lasx_xvaddwod_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_d_wu_w:.*xvmulwod\\.d\\.wu\\.w.*lasx_xvmulwod_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_w_hu_h:.*xvmulwod\\.w\\.hu\\.h.*lasx_xvmulwod_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_h_bu_b:.*xvmulwod\\.h\\.bu\\.b.*lasx_xvmulwod_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_q_d:.*xvhaddw\\.q\\.d.*lasx_xvhaddw_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_qu_du:.*xvhaddw\\.qu\\.du.*lasx_xvhaddw_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_q_d:.*xvhsubw\\.q\\.d.*lasx_xvhsubw_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_qu_du:.*xvhsubw\\.qu\\.du.*lasx_xvhsubw_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_q_d:.*xvmaddwev\\.q\\.d.*lasx_xvmaddwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_d_w:.*xvmaddwev\\.d\\.w.*lasx_xvmaddwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_w_h:.*xvmaddwev\\.w\\.h.*lasx_xvmaddwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_h_b:.*xvmaddwev\\.h\\.b.*lasx_xvmaddwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_q_du:.*xvmaddwev\\.q\\.du.*lasx_xvmaddwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_d_wu:.*xvmaddwev\\.d\\.wu.*lasx_xvmaddwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_w_hu:.*xvmaddwev\\.w\\.hu.*lasx_xvmaddwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_h_bu:.*xvmaddwev\\.h\\.bu.*lasx_xvmaddwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_q_d:.*xvmaddwod\\.q\\.d.*lasx_xvmaddwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_d_w:.*xvmaddwod\\.d\\.w.*lasx_xvmaddwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_w_h:.*xvmaddwod\\.w\\.h.*lasx_xvmaddwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_h_b:.*xvmaddwod\\.h\\.b.*lasx_xvmaddwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_q_du:.*xvmaddwod\\.q\\.du.*lasx_xvmaddwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_d_wu:.*xvmaddwod\\.d\\.wu.*lasx_xvmaddwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_w_hu:.*xvmaddwod\\.w\\.hu.*lasx_xvmaddwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_h_bu:.*xvmaddwod\\.h\\.bu.*lasx_xvmaddwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_q_du_d:.*xvmaddwev\\.q\\.du\\.d.*lasx_xvmaddwev_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_d_wu_w:.*xvmaddwev\\.d\\.wu\\.w.*lasx_xvmaddwev_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_w_hu_h:.*xvmaddwev\\.w\\.hu\\.h.*lasx_xvmaddwev_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_h_bu_b:.*xvmaddwev\\.h\\.bu\\.b.*lasx_xvmaddwev_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_q_du_d:.*xvmaddwod\\.q\\.du\\.d.*lasx_xvmaddwod_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_d_wu_w:.*xvmaddwod\\.d\\.wu\\.w.*lasx_xvmaddwod_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_w_hu_h:.*xvmaddwod\\.w\\.hu\\.h.*lasx_xvmaddwod_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_h_bu_b:.*xvmaddwod\\.h\\.bu\\.b.*lasx_xvmaddwod_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotr_b:.*xvrotr\\.b.*lasx_xvrotr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotr_h:.*xvrotr\\.h.*lasx_xvrotr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotr_w:.*xvrotr\\.w.*lasx_xvrotr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotr_d:.*xvrotr\\.d.*lasx_xvrotr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadd_q:.*xvadd\\.q.*lasx_xvadd_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsub_q:.*xvsub\\.q.*lasx_xvsub_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_q_du_d:.*xvaddwev\\.q\\.du\\.d.*lasx_xvaddwev_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_q_du_d:.*xvaddwod\\.q\\.du\\.d.*lasx_xvaddwod_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_q_du_d:.*xvmulwev\\.q\\.du\\.d.*lasx_xvmulwev_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_q_du_d:.*xvmulwod\\.q\\.du\\.d.*lasx_xvmulwod_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmskgez_b:.*xvmskgez\\.b.*lasx_xvmskgez_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmsknz_b:.*xvmsknz\\.b.*lasx_xvmsknz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_h_b:.*xvexth\\.h\\.b.*lasx_xvexth_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_w_h:.*xvexth\\.w\\.h.*lasx_xvexth_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_d_w:.*xvexth\\.d\\.w.*lasx_xvexth_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_q_d:.*xvexth\\.q\\.d.*lasx_xvexth_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_hu_bu:.*xvexth\\.hu\\.bu.*lasx_xvexth_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_wu_hu:.*xvexth\\.wu\\.hu.*lasx_xvexth_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_du_wu:.*xvexth\\.du\\.wu.*lasx_xvexth_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_qu_du:.*xvexth\\.qu\\.du.*lasx_xvexth_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotri_b:.*xvrotri\\.b.*lasx_xvrotri_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotri_h:.*xvrotri\\.h.*lasx_xvrotri_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotri_w:.*xvrotri\\.w.*lasx_xvrotri_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotri_d:.*xvrotri\\.d.*lasx_xvrotri_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvextl_q_d:.*xvextl\\.q\\.d.*lasx_xvextl_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlni_b_h:.*xvsrlni\\.b\\.h.*lasx_xvsrlni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlni_h_w:.*xvsrlni\\.h\\.w.*lasx_xvsrlni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlni_w_d:.*xvsrlni\\.w\\.d.*lasx_xvsrlni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlni_d_q:.*xvsrlni\\.d\\.q.*lasx_xvsrlni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrni_b_h:.*xvsrlrni\\.b\\.h.*lasx_xvsrlrni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrni_h_w:.*xvsrlrni\\.h\\.w.*lasx_xvsrlrni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrni_w_d:.*xvsrlrni\\.w\\.d.*lasx_xvsrlrni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrni_d_q:.*xvsrlrni\\.d\\.q.*lasx_xvsrlrni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_b_h:.*xvssrlni\\.b\\.h.*lasx_xvssrlni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_h_w:.*xvssrlni\\.h\\.w.*lasx_xvssrlni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_w_d:.*xvssrlni\\.w\\.d.*lasx_xvssrlni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_d_q:.*xvssrlni\\.d\\.q.*lasx_xvssrlni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_bu_h:.*xvssrlni\\.bu\\.h.*lasx_xvssrlni_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_hu_w:.*xvssrlni\\.hu\\.w.*lasx_xvssrlni_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_wu_d:.*xvssrlni\\.wu\\.d.*lasx_xvssrlni_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_du_q:.*xvssrlni\\.du\\.q.*lasx_xvssrlni_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_b_h:.*xvssrlrni\\.b\\.h.*lasx_xvssrlrni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_h_w:.*xvssrlrni\\.h\\.w.*lasx_xvssrlrni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_w_d:.*xvssrlrni\\.w\\.d.*lasx_xvssrlrni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_d_q:.*xvssrlrni\\.d\\.q.*lasx_xvssrlrni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_bu_h:.*xvssrlrni\\.bu\\.h.*lasx_xvssrlrni_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_hu_w:.*xvssrlrni\\.hu\\.w.*lasx_xvssrlrni_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_wu_d:.*xvssrlrni\\.wu\\.d.*lasx_xvssrlrni_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_du_q:.*xvssrlrni\\.du\\.q.*lasx_xvssrlrni_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrani_b_h:.*xvsrani\\.b\\.h.*lasx_xvsrani_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrani_h_w:.*xvsrani\\.h\\.w.*lasx_xvsrani_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrani_w_d:.*xvsrani\\.w\\.d.*lasx_xvsrani_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrani_d_q:.*xvsrani\\.d\\.q.*lasx_xvsrani_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarni_b_h:.*xvsrarni\\.b\\.h.*lasx_xvsrarni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarni_h_w:.*xvsrarni\\.h\\.w.*lasx_xvsrarni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarni_w_d:.*xvsrarni\\.w\\.d.*lasx_xvsrarni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarni_d_q:.*xvsrarni\\.d\\.q.*lasx_xvsrarni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_b_h:.*xvssrani\\.b\\.h.*lasx_xvssrani_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_h_w:.*xvssrani\\.h\\.w.*lasx_xvssrani_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_w_d:.*xvssrani\\.w\\.d.*lasx_xvssrani_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_d_q:.*xvssrani\\.d\\.q.*lasx_xvssrani_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_bu_h:.*xvssrani\\.bu\\.h.*lasx_xvssrani_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_hu_w:.*xvssrani\\.hu\\.w.*lasx_xvssrani_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_wu_d:.*xvssrani\\.wu\\.d.*lasx_xvssrani_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_du_q:.*xvssrani\\.du\\.q.*lasx_xvssrani_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_b_h:.*xvssrarni\\.b\\.h.*lasx_xvssrarni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_h_w:.*xvssrarni\\.h\\.w.*lasx_xvssrarni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_w_d:.*xvssrarni\\.w\\.d.*lasx_xvssrarni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_d_q:.*xvssrarni\\.d\\.q.*lasx_xvssrarni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_bu_h:.*xvssrarni\\.bu\\.h.*lasx_xvssrarni_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_hu_w:.*xvssrarni\\.hu\\.w.*lasx_xvssrarni_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_wu_d:.*xvssrarni\\.wu\\.d.*lasx_xvssrarni_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_du_q:.*xvssrarni\\.du\\.q.*lasx_xvssrarni_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbnz_b:.*xvsetanyeqz\\.b.*lasx_xbnz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbnz_d:.*xvsetanyeqz\\.d.*lasx_xbnz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbnz_h:.*xvsetanyeqz\\.h.*lasx_xbnz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbnz_v:.*xvseteqz\\.v.*lasx_xbnz_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbnz_w:.*xvsetanyeqz\\.w.*lasx_xbnz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbz_b:.*xvsetallnez\\.b.*lasx_xbz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbz_d:.*xvsetallnez\\.d.*lasx_xbz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbz_h:.*xvsetallnez\\.h.*lasx_xbz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbz_v:.*xvsetnez\\.v.*lasx_xbz_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbz_w:.*xvsetallnez\\.w.*lasx_xbz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_caf_d:.*xvfcmp\\.caf\\.d.*lasx_xvfcmp_caf_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_caf_s:.*xvfcmp\\.caf\\.s.*lasx_xvfcmp_caf_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_ceq_d:.*xvfcmp\\.ceq\\.d.*lasx_xvfcmp_ceq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_ceq_s:.*xvfcmp\\.ceq\\.s.*lasx_xvfcmp_ceq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cle_d:.*xvfcmp\\.cle\\.d.*lasx_xvfcmp_cle_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cle_s:.*xvfcmp\\.cle\\.s.*lasx_xvfcmp_cle_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_clt_d:.*xvfcmp\\.clt\\.d.*lasx_xvfcmp_clt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_clt_s:.*xvfcmp\\.clt\\.s.*lasx_xvfcmp_clt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cne_d:.*xvfcmp\\.cne\\.d.*lasx_xvfcmp_cne_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cne_s:.*xvfcmp\\.cne\\.s.*lasx_xvfcmp_cne_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cor_d:.*xvfcmp\\.cor\\.d.*lasx_xvfcmp_cor_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cor_s:.*xvfcmp\\.cor\\.s.*lasx_xvfcmp_cor_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cueq_d:.*xvfcmp\\.cueq\\.d.*lasx_xvfcmp_cueq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cueq_s:.*xvfcmp\\.cueq\\.s.*lasx_xvfcmp_cueq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cule_d:.*xvfcmp\\.cule\\.d.*lasx_xvfcmp_cule_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cule_s:.*xvfcmp\\.cule\\.s.*lasx_xvfcmp_cule_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cult_d:.*xvfcmp\\.cult\\.d.*lasx_xvfcmp_cult_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cult_s:.*xvfcmp\\.cult\\.s.*lasx_xvfcmp_cult_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cun_d:.*xvfcmp\\.cun\\.d.*lasx_xvfcmp_cun_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cune_d:.*xvfcmp\\.cune\\.d.*lasx_xvfcmp_cune_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cune_s:.*xvfcmp\\.cune\\.s.*lasx_xvfcmp_cune_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cun_s:.*xvfcmp\\.cun\\.s.*lasx_xvfcmp_cun_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_saf_d:.*xvfcmp\\.saf\\.d.*lasx_xvfcmp_saf_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_saf_s:.*xvfcmp\\.saf\\.s.*lasx_xvfcmp_saf_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_seq_d:.*xvfcmp\\.seq\\.d.*lasx_xvfcmp_seq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_seq_s:.*xvfcmp\\.seq\\.s.*lasx_xvfcmp_seq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sle_d:.*xvfcmp\\.sle\\.d.*lasx_xvfcmp_sle_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sle_s:.*xvfcmp\\.sle\\.s.*lasx_xvfcmp_sle_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_slt_d:.*xvfcmp\\.slt\\.d.*lasx_xvfcmp_slt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_slt_s:.*xvfcmp\\.slt\\.s.*lasx_xvfcmp_slt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sne_d:.*xvfcmp\\.sne\\.d.*lasx_xvfcmp_sne_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sne_s:.*xvfcmp\\.sne\\.s.*lasx_xvfcmp_sne_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sor_d:.*xvfcmp\\.sor\\.d.*lasx_xvfcmp_sor_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sor_s:.*xvfcmp\\.sor\\.s.*lasx_xvfcmp_sor_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sueq_d:.*xvfcmp\\.sueq\\.d.*lasx_xvfcmp_sueq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sueq_s:.*xvfcmp\\.sueq\\.s.*lasx_xvfcmp_sueq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sule_d:.*xvfcmp\\.sule\\.d.*lasx_xvfcmp_sule_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sule_s:.*xvfcmp\\.sule\\.s.*lasx_xvfcmp_sule_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sult_d:.*xvfcmp\\.sult\\.d.*lasx_xvfcmp_sult_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sult_s:.*xvfcmp\\.sult\\.s.*lasx_xvfcmp_sult_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sun_d:.*xvfcmp\\.sun\\.d.*lasx_xvfcmp_sun_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sune_d:.*xvfcmp\\.sune\\.d.*lasx_xvfcmp_sune_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sune_s:.*xvfcmp\\.sune\\.s.*lasx_xvfcmp_sune_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sun_s:.*xvfcmp\\.sun\\.s.*lasx_xvfcmp_sun_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve_d_f:.*xvpickve\\.d.*lasx_xvpickve_d_f" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve_w_f:.*xvpickve\\.w.*lasx_xvpickve_w_f" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepli_b:.*xvrepli\\.b.*lasx_xvrepli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepli_d:.*xvrepli\\.d.*lasx_xvrepli_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepli_h:.*xvrepli\\.h.*lasx_xvrepli_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepli_w:.*xvrepli\\.w.*lasx_xvrepli_w" 1 } } */ ++ ++typedef signed char v32i8 __attribute__ ((vector_size (32), aligned (32))); ++typedef signed char v32i8_b __attribute__ ((vector_size (32), aligned (1))); ++typedef unsigned char v32u8 __attribute__ ((vector_size (32), aligned (32))); ++typedef unsigned char v32u8_b __attribute__ ((vector_size (32), aligned (1))); ++typedef short v16i16 __attribute__ ((vector_size (32), aligned (32))); ++typedef short v16i16_h __attribute__ ((vector_size (32), aligned (2))); ++typedef unsigned short v16u16 __attribute__ ((vector_size (32), aligned (32))); ++typedef unsigned short v16u16_h ++ __attribute__ ((vector_size (32), aligned (2))); ++typedef int v8i32 __attribute__ ((vector_size (32), aligned (32))); ++typedef int v8i32_w __attribute__ ((vector_size (32), aligned (4))); ++typedef unsigned int v8u32 __attribute__ ((vector_size (32), aligned (32))); ++typedef unsigned int v8u32_w __attribute__ ((vector_size (32), aligned (4))); ++typedef long long v4i64 __attribute__ ((vector_size (32), aligned (32))); ++typedef long long v4i64_d __attribute__ ((vector_size (32), aligned (8))); ++typedef unsigned long long v4u64 ++ __attribute__ ((vector_size (32), aligned (32))); ++typedef unsigned long long v4u64_d ++ __attribute__ ((vector_size (32), aligned (8))); ++typedef float v8f32 __attribute__ ((vector_size (32), aligned (32))); ++typedef float v8f32_w __attribute__ ((vector_size (32), aligned (4))); ++typedef double v4f64 __attribute__ ((vector_size (32), aligned (32))); ++typedef double v4f64_d __attribute__ ((vector_size (32), aligned (8))); ++ ++typedef double v4f64 __attribute__ ((vector_size (32), aligned (32))); ++typedef double v4f64_d __attribute__ ((vector_size (32), aligned (8))); ++ ++typedef float __m256 __attribute__ ((__vector_size__ (32), __may_alias__)); ++typedef long long __m256i ++ __attribute__ ((__vector_size__ (32), __may_alias__)); ++typedef double __m256d __attribute__ ((__vector_size__ (32), __may_alias__)); ++ ++/* Unaligned version of the same types. */ ++typedef float __m256_u ++ __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); ++typedef long long __m256i_u ++ __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); ++typedef double __m256d_u ++ __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); ++ ++v32i8 ++__lasx_xvsll_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsll_b (_1, _2); ++} ++v16i16 ++__lasx_xvsll_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsll_h (_1, _2); ++} ++v8i32 ++__lasx_xvsll_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsll_w (_1, _2); ++} ++v4i64 ++__lasx_xvsll_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsll_d (_1, _2); ++} ++v32i8 ++__lasx_xvslli_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvslli_b (_1, 1); ++} ++v16i16 ++__lasx_xvslli_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvslli_h (_1, 1); ++} ++v8i32 ++__lasx_xvslli_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvslli_w (_1, 1); ++} ++v4i64 ++__lasx_xvslli_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvslli_d (_1, 1); ++} ++v32i8 ++__lasx_xvsra_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsra_b (_1, _2); ++} ++v16i16 ++__lasx_xvsra_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsra_h (_1, _2); ++} ++v8i32 ++__lasx_xvsra_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsra_w (_1, _2); ++} ++v4i64 ++__lasx_xvsra_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsra_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrai_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvsrai_b (_1, 1); ++} ++v16i16 ++__lasx_xvsrai_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvsrai_h (_1, 1); ++} ++v8i32 ++__lasx_xvsrai_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvsrai_w (_1, 1); ++} ++v4i64 ++__lasx_xvsrai_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvsrai_d (_1, 1); ++} ++v32i8 ++__lasx_xvsrar_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrar_b (_1, _2); ++} ++v16i16 ++__lasx_xvsrar_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrar_h (_1, _2); ++} ++v8i32 ++__lasx_xvsrar_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrar_w (_1, _2); ++} ++v4i64 ++__lasx_xvsrar_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrar_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrari_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvsrari_b (_1, 1); ++} ++v16i16 ++__lasx_xvsrari_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvsrari_h (_1, 1); ++} ++v8i32 ++__lasx_xvsrari_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvsrari_w (_1, 1); ++} ++v4i64 ++__lasx_xvsrari_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvsrari_d (_1, 1); ++} ++v32i8 ++__lasx_xvsrl_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrl_b (_1, _2); ++} ++v16i16 ++__lasx_xvsrl_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrl_h (_1, _2); ++} ++v8i32 ++__lasx_xvsrl_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrl_w (_1, _2); ++} ++v4i64 ++__lasx_xvsrl_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrl_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrli_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvsrli_b (_1, 1); ++} ++v16i16 ++__lasx_xvsrli_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvsrli_h (_1, 1); ++} ++v8i32 ++__lasx_xvsrli_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvsrli_w (_1, 1); ++} ++v4i64 ++__lasx_xvsrli_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvsrli_d (_1, 1); ++} ++v32i8 ++__lasx_xvsrlr_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrlr_b (_1, _2); ++} ++v16i16 ++__lasx_xvsrlr_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrlr_h (_1, _2); ++} ++v8i32 ++__lasx_xvsrlr_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrlr_w (_1, _2); ++} ++v4i64 ++__lasx_xvsrlr_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrlr_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrlri_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvsrlri_b (_1, 1); ++} ++v16i16 ++__lasx_xvsrlri_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvsrlri_h (_1, 1); ++} ++v8i32 ++__lasx_xvsrlri_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvsrlri_w (_1, 1); ++} ++v4i64 ++__lasx_xvsrlri_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvsrlri_d (_1, 1); ++} ++v32u8 ++__lasx_xvbitclr_b (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvbitclr_b (_1, _2); ++} ++v16u16 ++__lasx_xvbitclr_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvbitclr_h (_1, _2); ++} ++v8u32 ++__lasx_xvbitclr_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvbitclr_w (_1, _2); ++} ++v4u64 ++__lasx_xvbitclr_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvbitclr_d (_1, _2); ++} ++v32u8 ++__lasx_xvbitclri_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvbitclri_b (_1, 1); ++} ++v16u16 ++__lasx_xvbitclri_h (v16u16 _1) ++{ ++ return __builtin_lasx_xvbitclri_h (_1, 1); ++} ++v8u32 ++__lasx_xvbitclri_w (v8u32 _1) ++{ ++ return __builtin_lasx_xvbitclri_w (_1, 1); ++} ++v4u64 ++__lasx_xvbitclri_d (v4u64 _1) ++{ ++ return __builtin_lasx_xvbitclri_d (_1, 1); ++} ++v32u8 ++__lasx_xvbitset_b (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvbitset_b (_1, _2); ++} ++v16u16 ++__lasx_xvbitset_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvbitset_h (_1, _2); ++} ++v8u32 ++__lasx_xvbitset_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvbitset_w (_1, _2); ++} ++v4u64 ++__lasx_xvbitset_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvbitset_d (_1, _2); ++} ++v32u8 ++__lasx_xvbitseti_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvbitseti_b (_1, 1); ++} ++v16u16 ++__lasx_xvbitseti_h (v16u16 _1) ++{ ++ return __builtin_lasx_xvbitseti_h (_1, 1); ++} ++v8u32 ++__lasx_xvbitseti_w (v8u32 _1) ++{ ++ return __builtin_lasx_xvbitseti_w (_1, 1); ++} ++v4u64 ++__lasx_xvbitseti_d (v4u64 _1) ++{ ++ return __builtin_lasx_xvbitseti_d (_1, 1); ++} ++v32u8 ++__lasx_xvbitrev_b (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvbitrev_b (_1, _2); ++} ++v16u16 ++__lasx_xvbitrev_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvbitrev_h (_1, _2); ++} ++v8u32 ++__lasx_xvbitrev_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvbitrev_w (_1, _2); ++} ++v4u64 ++__lasx_xvbitrev_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvbitrev_d (_1, _2); ++} ++v32u8 ++__lasx_xvbitrevi_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvbitrevi_b (_1, 1); ++} ++v16u16 ++__lasx_xvbitrevi_h (v16u16 _1) ++{ ++ return __builtin_lasx_xvbitrevi_h (_1, 1); ++} ++v8u32 ++__lasx_xvbitrevi_w (v8u32 _1) ++{ ++ return __builtin_lasx_xvbitrevi_w (_1, 1); ++} ++v4u64 ++__lasx_xvbitrevi_d (v4u64 _1) ++{ ++ return __builtin_lasx_xvbitrevi_d (_1, 1); ++} ++v32i8 ++__lasx_xvadd_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvadd_b (_1, _2); ++} ++v16i16 ++__lasx_xvadd_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvadd_h (_1, _2); ++} ++v8i32 ++__lasx_xvadd_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvadd_w (_1, _2); ++} ++v4i64 ++__lasx_xvadd_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvadd_d (_1, _2); ++} ++v32i8 ++__lasx_xvaddi_bu (v32i8 _1) ++{ ++ return __builtin_lasx_xvaddi_bu (_1, 1); ++} ++v16i16 ++__lasx_xvaddi_hu (v16i16 _1) ++{ ++ return __builtin_lasx_xvaddi_hu (_1, 1); ++} ++v8i32 ++__lasx_xvaddi_wu (v8i32 _1) ++{ ++ return __builtin_lasx_xvaddi_wu (_1, 1); ++} ++v4i64 ++__lasx_xvaddi_du (v4i64 _1) ++{ ++ return __builtin_lasx_xvaddi_du (_1, 1); ++} ++v32i8 ++__lasx_xvsub_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsub_b (_1, _2); ++} ++v16i16 ++__lasx_xvsub_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsub_h (_1, _2); ++} ++v8i32 ++__lasx_xvsub_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsub_w (_1, _2); ++} ++v4i64 ++__lasx_xvsub_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsub_d (_1, _2); ++} ++v32i8 ++__lasx_xvsubi_bu (v32i8 _1) ++{ ++ return __builtin_lasx_xvsubi_bu (_1, 1); ++} ++v16i16 ++__lasx_xvsubi_hu (v16i16 _1) ++{ ++ return __builtin_lasx_xvsubi_hu (_1, 1); ++} ++v8i32 ++__lasx_xvsubi_wu (v8i32 _1) ++{ ++ return __builtin_lasx_xvsubi_wu (_1, 1); ++} ++v4i64 ++__lasx_xvsubi_du (v4i64 _1) ++{ ++ return __builtin_lasx_xvsubi_du (_1, 1); ++} ++v32i8 ++__lasx_xvmax_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmax_b (_1, _2); ++} ++v16i16 ++__lasx_xvmax_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmax_h (_1, _2); ++} ++v8i32 ++__lasx_xvmax_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmax_w (_1, _2); ++} ++v4i64 ++__lasx_xvmax_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmax_d (_1, _2); ++} ++v32i8 ++__lasx_xvmaxi_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvmaxi_b (_1, 1); ++} ++v16i16 ++__lasx_xvmaxi_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvmaxi_h (_1, 1); ++} ++v8i32 ++__lasx_xvmaxi_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvmaxi_w (_1, 1); ++} ++v4i64 ++__lasx_xvmaxi_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvmaxi_d (_1, 1); ++} ++v32u8 ++__lasx_xvmax_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvmax_bu (_1, _2); ++} ++v16u16 ++__lasx_xvmax_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvmax_hu (_1, _2); ++} ++v8u32 ++__lasx_xvmax_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvmax_wu (_1, _2); ++} ++v4u64 ++__lasx_xvmax_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvmax_du (_1, _2); ++} ++v32u8 ++__lasx_xvmaxi_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvmaxi_bu (_1, 1); ++} ++v16u16 ++__lasx_xvmaxi_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvmaxi_hu (_1, 1); ++} ++v8u32 ++__lasx_xvmaxi_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvmaxi_wu (_1, 1); ++} ++v4u64 ++__lasx_xvmaxi_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvmaxi_du (_1, 1); ++} ++v32i8 ++__lasx_xvmin_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmin_b (_1, _2); ++} ++v16i16 ++__lasx_xvmin_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmin_h (_1, _2); ++} ++v8i32 ++__lasx_xvmin_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmin_w (_1, _2); ++} ++v4i64 ++__lasx_xvmin_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmin_d (_1, _2); ++} ++v32i8 ++__lasx_xvmini_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvmini_b (_1, 1); ++} ++v16i16 ++__lasx_xvmini_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvmini_h (_1, 1); ++} ++v8i32 ++__lasx_xvmini_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvmini_w (_1, 1); ++} ++v4i64 ++__lasx_xvmini_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvmini_d (_1, 1); ++} ++v32u8 ++__lasx_xvmin_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvmin_bu (_1, _2); ++} ++v16u16 ++__lasx_xvmin_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvmin_hu (_1, _2); ++} ++v8u32 ++__lasx_xvmin_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvmin_wu (_1, _2); ++} ++v4u64 ++__lasx_xvmin_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvmin_du (_1, _2); ++} ++v32u8 ++__lasx_xvmini_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvmini_bu (_1, 1); ++} ++v16u16 ++__lasx_xvmini_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvmini_hu (_1, 1); ++} ++v8u32 ++__lasx_xvmini_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvmini_wu (_1, 1); ++} ++v4u64 ++__lasx_xvmini_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvmini_du (_1, 1); ++} ++v32i8 ++__lasx_xvseq_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvseq_b (_1, _2); ++} ++v16i16 ++__lasx_xvseq_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvseq_h (_1, _2); ++} ++v8i32 ++__lasx_xvseq_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvseq_w (_1, _2); ++} ++v4i64 ++__lasx_xvseq_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvseq_d (_1, _2); ++} ++v32i8 ++__lasx_xvseqi_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvseqi_b (_1, 1); ++} ++v16i16 ++__lasx_xvseqi_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvseqi_h (_1, 1); ++} ++v8i32 ++__lasx_xvseqi_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvseqi_w (_1, 1); ++} ++v4i64 ++__lasx_xvseqi_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvseqi_d (_1, 1); ++} ++v32i8 ++__lasx_xvslt_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvslt_b (_1, _2); ++} ++v16i16 ++__lasx_xvslt_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvslt_h (_1, _2); ++} ++v8i32 ++__lasx_xvslt_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvslt_w (_1, _2); ++} ++v4i64 ++__lasx_xvslt_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvslt_d (_1, _2); ++} ++v32i8 ++__lasx_xvslti_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvslti_b (_1, 1); ++} ++v16i16 ++__lasx_xvslti_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvslti_h (_1, 1); ++} ++v8i32 ++__lasx_xvslti_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvslti_w (_1, 1); ++} ++v4i64 ++__lasx_xvslti_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvslti_d (_1, 1); ++} ++v32i8 ++__lasx_xvslt_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvslt_bu (_1, _2); ++} ++v16i16 ++__lasx_xvslt_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvslt_hu (_1, _2); ++} ++v8i32 ++__lasx_xvslt_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvslt_wu (_1, _2); ++} ++v4i64 ++__lasx_xvslt_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvslt_du (_1, _2); ++} ++v32i8 ++__lasx_xvslti_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvslti_bu (_1, 1); ++} ++v16i16 ++__lasx_xvslti_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvslti_hu (_1, 1); ++} ++v8i32 ++__lasx_xvslti_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvslti_wu (_1, 1); ++} ++v4i64 ++__lasx_xvslti_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvslti_du (_1, 1); ++} ++v32i8 ++__lasx_xvsle_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsle_b (_1, _2); ++} ++v16i16 ++__lasx_xvsle_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsle_h (_1, _2); ++} ++v8i32 ++__lasx_xvsle_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsle_w (_1, _2); ++} ++v4i64 ++__lasx_xvsle_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsle_d (_1, _2); ++} ++v32i8 ++__lasx_xvslei_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvslei_b (_1, 1); ++} ++v16i16 ++__lasx_xvslei_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvslei_h (_1, 1); ++} ++v8i32 ++__lasx_xvslei_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvslei_w (_1, 1); ++} ++v4i64 ++__lasx_xvslei_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvslei_d (_1, 1); ++} ++v32i8 ++__lasx_xvsle_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvsle_bu (_1, _2); ++} ++v16i16 ++__lasx_xvsle_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvsle_hu (_1, _2); ++} ++v8i32 ++__lasx_xvsle_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvsle_wu (_1, _2); ++} ++v4i64 ++__lasx_xvsle_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvsle_du (_1, _2); ++} ++v32i8 ++__lasx_xvslei_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvslei_bu (_1, 1); ++} ++v16i16 ++__lasx_xvslei_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvslei_hu (_1, 1); ++} ++v8i32 ++__lasx_xvslei_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvslei_wu (_1, 1); ++} ++v4i64 ++__lasx_xvslei_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvslei_du (_1, 1); ++} ++v32i8 ++__lasx_xvsat_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvsat_b (_1, 1); ++} ++v16i16 ++__lasx_xvsat_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvsat_h (_1, 1); ++} ++v8i32 ++__lasx_xvsat_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvsat_w (_1, 1); ++} ++v4i64 ++__lasx_xvsat_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvsat_d (_1, 1); ++} ++v32u8 ++__lasx_xvsat_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvsat_bu (_1, 1); ++} ++v16u16 ++__lasx_xvsat_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvsat_hu (_1, 1); ++} ++v8u32 ++__lasx_xvsat_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvsat_wu (_1, 1); ++} ++v4u64 ++__lasx_xvsat_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvsat_du (_1, 1); ++} ++v32i8 ++__lasx_xvadda_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvadda_b (_1, _2); ++} ++v16i16 ++__lasx_xvadda_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvadda_h (_1, _2); ++} ++v8i32 ++__lasx_xvadda_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvadda_w (_1, _2); ++} ++v4i64 ++__lasx_xvadda_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvadda_d (_1, _2); ++} ++v32i8 ++__lasx_xvsadd_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsadd_b (_1, _2); ++} ++v16i16 ++__lasx_xvsadd_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsadd_h (_1, _2); ++} ++v8i32 ++__lasx_xvsadd_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsadd_w (_1, _2); ++} ++v4i64 ++__lasx_xvsadd_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsadd_d (_1, _2); ++} ++v32u8 ++__lasx_xvsadd_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvsadd_bu (_1, _2); ++} ++v16u16 ++__lasx_xvsadd_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvsadd_hu (_1, _2); ++} ++v8u32 ++__lasx_xvsadd_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvsadd_wu (_1, _2); ++} ++v4u64 ++__lasx_xvsadd_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvsadd_du (_1, _2); ++} ++v32i8 ++__lasx_xvavg_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvavg_b (_1, _2); ++} ++v16i16 ++__lasx_xvavg_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvavg_h (_1, _2); ++} ++v8i32 ++__lasx_xvavg_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvavg_w (_1, _2); ++} ++v4i64 ++__lasx_xvavg_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvavg_d (_1, _2); ++} ++v32u8 ++__lasx_xvavg_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvavg_bu (_1, _2); ++} ++v16u16 ++__lasx_xvavg_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvavg_hu (_1, _2); ++} ++v8u32 ++__lasx_xvavg_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvavg_wu (_1, _2); ++} ++v4u64 ++__lasx_xvavg_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvavg_du (_1, _2); ++} ++v32i8 ++__lasx_xvavgr_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvavgr_b (_1, _2); ++} ++v16i16 ++__lasx_xvavgr_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvavgr_h (_1, _2); ++} ++v8i32 ++__lasx_xvavgr_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvavgr_w (_1, _2); ++} ++v4i64 ++__lasx_xvavgr_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvavgr_d (_1, _2); ++} ++v32u8 ++__lasx_xvavgr_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvavgr_bu (_1, _2); ++} ++v16u16 ++__lasx_xvavgr_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvavgr_hu (_1, _2); ++} ++v8u32 ++__lasx_xvavgr_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvavgr_wu (_1, _2); ++} ++v4u64 ++__lasx_xvavgr_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvavgr_du (_1, _2); ++} ++v32i8 ++__lasx_xvssub_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssub_b (_1, _2); ++} ++v16i16 ++__lasx_xvssub_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssub_h (_1, _2); ++} ++v8i32 ++__lasx_xvssub_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssub_w (_1, _2); ++} ++v4i64 ++__lasx_xvssub_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssub_d (_1, _2); ++} ++v32u8 ++__lasx_xvssub_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvssub_bu (_1, _2); ++} ++v16u16 ++__lasx_xvssub_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvssub_hu (_1, _2); ++} ++v8u32 ++__lasx_xvssub_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvssub_wu (_1, _2); ++} ++v4u64 ++__lasx_xvssub_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvssub_du (_1, _2); ++} ++v32i8 ++__lasx_xvabsd_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvabsd_b (_1, _2); ++} ++v16i16 ++__lasx_xvabsd_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvabsd_h (_1, _2); ++} ++v8i32 ++__lasx_xvabsd_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvabsd_w (_1, _2); ++} ++v4i64 ++__lasx_xvabsd_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvabsd_d (_1, _2); ++} ++v32u8 ++__lasx_xvabsd_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvabsd_bu (_1, _2); ++} ++v16u16 ++__lasx_xvabsd_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvabsd_hu (_1, _2); ++} ++v8u32 ++__lasx_xvabsd_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvabsd_wu (_1, _2); ++} ++v4u64 ++__lasx_xvabsd_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvabsd_du (_1, _2); ++} ++v32i8 ++__lasx_xvmul_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmul_b (_1, _2); ++} ++v16i16 ++__lasx_xvmul_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmul_h (_1, _2); ++} ++v8i32 ++__lasx_xvmul_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmul_w (_1, _2); ++} ++v4i64 ++__lasx_xvmul_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmul_d (_1, _2); ++} ++v32i8 ++__lasx_xvmadd_b (v32i8 _1, v32i8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvmadd_b (_1, _2, _3); ++} ++v16i16 ++__lasx_xvmadd_h (v16i16 _1, v16i16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvmadd_h (_1, _2, _3); ++} ++v8i32 ++__lasx_xvmadd_w (v8i32 _1, v8i32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvmadd_w (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmadd_d (v4i64 _1, v4i64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvmadd_d (_1, _2, _3); ++} ++v32i8 ++__lasx_xvmsub_b (v32i8 _1, v32i8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvmsub_b (_1, _2, _3); ++} ++v16i16 ++__lasx_xvmsub_h (v16i16 _1, v16i16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvmsub_h (_1, _2, _3); ++} ++v8i32 ++__lasx_xvmsub_w (v8i32 _1, v8i32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvmsub_w (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmsub_d (v4i64 _1, v4i64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvmsub_d (_1, _2, _3); ++} ++v32i8 ++__lasx_xvdiv_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvdiv_b (_1, _2); ++} ++v16i16 ++__lasx_xvdiv_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvdiv_h (_1, _2); ++} ++v8i32 ++__lasx_xvdiv_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvdiv_w (_1, _2); ++} ++v4i64 ++__lasx_xvdiv_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvdiv_d (_1, _2); ++} ++v32u8 ++__lasx_xvdiv_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvdiv_bu (_1, _2); ++} ++v16u16 ++__lasx_xvdiv_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvdiv_hu (_1, _2); ++} ++v8u32 ++__lasx_xvdiv_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvdiv_wu (_1, _2); ++} ++v4u64 ++__lasx_xvdiv_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvdiv_du (_1, _2); ++} ++v16i16 ++__lasx_xvhaddw_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvhaddw_h_b (_1, _2); ++} ++v8i32 ++__lasx_xvhaddw_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvhaddw_w_h (_1, _2); ++} ++v4i64 ++__lasx_xvhaddw_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvhaddw_d_w (_1, _2); ++} ++v16u16 ++__lasx_xvhaddw_hu_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvhaddw_hu_bu (_1, _2); ++} ++v8u32 ++__lasx_xvhaddw_wu_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvhaddw_wu_hu (_1, _2); ++} ++v4u64 ++__lasx_xvhaddw_du_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvhaddw_du_wu (_1, _2); ++} ++v16i16 ++__lasx_xvhsubw_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvhsubw_h_b (_1, _2); ++} ++v8i32 ++__lasx_xvhsubw_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvhsubw_w_h (_1, _2); ++} ++v4i64 ++__lasx_xvhsubw_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvhsubw_d_w (_1, _2); ++} ++v16i16 ++__lasx_xvhsubw_hu_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvhsubw_hu_bu (_1, _2); ++} ++v8i32 ++__lasx_xvhsubw_wu_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvhsubw_wu_hu (_1, _2); ++} ++v4i64 ++__lasx_xvhsubw_du_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvhsubw_du_wu (_1, _2); ++} ++v32i8 ++__lasx_xvmod_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmod_b (_1, _2); ++} ++v16i16 ++__lasx_xvmod_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmod_h (_1, _2); ++} ++v8i32 ++__lasx_xvmod_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmod_w (_1, _2); ++} ++v4i64 ++__lasx_xvmod_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmod_d (_1, _2); ++} ++v32u8 ++__lasx_xvmod_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvmod_bu (_1, _2); ++} ++v16u16 ++__lasx_xvmod_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvmod_hu (_1, _2); ++} ++v8u32 ++__lasx_xvmod_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvmod_wu (_1, _2); ++} ++v4u64 ++__lasx_xvmod_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvmod_du (_1, _2); ++} ++v32i8 ++__lasx_xvrepl128vei_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvrepl128vei_b (_1, 1); ++} ++v16i16 ++__lasx_xvrepl128vei_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvrepl128vei_h (_1, 1); ++} ++v8i32 ++__lasx_xvrepl128vei_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvrepl128vei_w (_1, 1); ++} ++v4i64 ++__lasx_xvrepl128vei_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvrepl128vei_d (_1, 1); ++} ++v32i8 ++__lasx_xvpickev_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvpickev_b (_1, _2); ++} ++v16i16 ++__lasx_xvpickev_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvpickev_h (_1, _2); ++} ++v8i32 ++__lasx_xvpickev_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvpickev_w (_1, _2); ++} ++v4i64 ++__lasx_xvpickev_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvpickev_d (_1, _2); ++} ++v32i8 ++__lasx_xvpickod_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvpickod_b (_1, _2); ++} ++v16i16 ++__lasx_xvpickod_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvpickod_h (_1, _2); ++} ++v8i32 ++__lasx_xvpickod_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvpickod_w (_1, _2); ++} ++v4i64 ++__lasx_xvpickod_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvpickod_d (_1, _2); ++} ++v32i8 ++__lasx_xvilvh_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvilvh_b (_1, _2); ++} ++v16i16 ++__lasx_xvilvh_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvilvh_h (_1, _2); ++} ++v8i32 ++__lasx_xvilvh_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvilvh_w (_1, _2); ++} ++v4i64 ++__lasx_xvilvh_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvilvh_d (_1, _2); ++} ++v32i8 ++__lasx_xvilvl_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvilvl_b (_1, _2); ++} ++v16i16 ++__lasx_xvilvl_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvilvl_h (_1, _2); ++} ++v8i32 ++__lasx_xvilvl_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvilvl_w (_1, _2); ++} ++v4i64 ++__lasx_xvilvl_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvilvl_d (_1, _2); ++} ++v32i8 ++__lasx_xvpackev_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvpackev_b (_1, _2); ++} ++v16i16 ++__lasx_xvpackev_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvpackev_h (_1, _2); ++} ++v8i32 ++__lasx_xvpackev_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvpackev_w (_1, _2); ++} ++v4i64 ++__lasx_xvpackev_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvpackev_d (_1, _2); ++} ++v32i8 ++__lasx_xvpackod_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvpackod_b (_1, _2); ++} ++v16i16 ++__lasx_xvpackod_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvpackod_h (_1, _2); ++} ++v8i32 ++__lasx_xvpackod_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvpackod_w (_1, _2); ++} ++v4i64 ++__lasx_xvpackod_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvpackod_d (_1, _2); ++} ++v32i8 ++__lasx_xvshuf_b (v32i8 _1, v32i8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvshuf_b (_1, _2, _3); ++} ++v16i16 ++__lasx_xvshuf_h (v16i16 _1, v16i16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvshuf_h (_1, _2, _3); ++} ++v8i32 ++__lasx_xvshuf_w (v8i32 _1, v8i32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvshuf_w (_1, _2, _3); ++} ++v4i64 ++__lasx_xvshuf_d (v4i64 _1, v4i64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvshuf_d (_1, _2, _3); ++} ++v32u8 ++__lasx_xvand_v (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvand_v (_1, _2); ++} ++v32u8 ++__lasx_xvandi_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvandi_b (_1, 1); ++} ++v32u8 ++__lasx_xvor_v (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvor_v (_1, _2); ++} ++v32u8 ++__lasx_xvori_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvori_b (_1, 1); ++} ++v32u8 ++__lasx_xvnor_v (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvnor_v (_1, _2); ++} ++v32u8 ++__lasx_xvnori_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvnori_b (_1, 1); ++} ++v32u8 ++__lasx_xvxor_v (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvxor_v (_1, _2); ++} ++v32u8 ++__lasx_xvxori_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvxori_b (_1, 1); ++} ++v32u8 ++__lasx_xvbitsel_v (v32u8 _1, v32u8 _2, v32u8 _3) ++{ ++ return __builtin_lasx_xvbitsel_v (_1, _2, _3); ++} ++v32u8 ++__lasx_xvbitseli_b (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvbitseli_b (_1, _2, 1); ++} ++v32i8 ++__lasx_xvshuf4i_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvshuf4i_b (_1, 1); ++} ++v16i16 ++__lasx_xvshuf4i_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvshuf4i_h (_1, 1); ++} ++v8i32 ++__lasx_xvshuf4i_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvshuf4i_w (_1, 1); ++} ++v32i8 ++__lasx_xvreplgr2vr_b (int _1) ++{ ++ return __builtin_lasx_xvreplgr2vr_b (_1); ++} ++v16i16 ++__lasx_xvreplgr2vr_h (int _1) ++{ ++ return __builtin_lasx_xvreplgr2vr_h (_1); ++} ++v8i32 ++__lasx_xvreplgr2vr_w (int _1) ++{ ++ return __builtin_lasx_xvreplgr2vr_w (_1); ++} ++v4i64 ++__lasx_xvreplgr2vr_d (int _1) ++{ ++ return __builtin_lasx_xvreplgr2vr_d (_1); ++} ++v32i8 ++__lasx_xvpcnt_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvpcnt_b (_1); ++} ++v16i16 ++__lasx_xvpcnt_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvpcnt_h (_1); ++} ++v8i32 ++__lasx_xvpcnt_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvpcnt_w (_1); ++} ++v4i64 ++__lasx_xvpcnt_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvpcnt_d (_1); ++} ++v32i8 ++__lasx_xvclo_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvclo_b (_1); ++} ++v16i16 ++__lasx_xvclo_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvclo_h (_1); ++} ++v8i32 ++__lasx_xvclo_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvclo_w (_1); ++} ++v4i64 ++__lasx_xvclo_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvclo_d (_1); ++} ++v32i8 ++__lasx_xvclz_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvclz_b (_1); ++} ++v16i16 ++__lasx_xvclz_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvclz_h (_1); ++} ++v8i32 ++__lasx_xvclz_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvclz_w (_1); ++} ++v4i64 ++__lasx_xvclz_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvclz_d (_1); ++} ++v8f32 ++__lasx_xvfadd_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfadd_s (_1, _2); ++} ++v4f64 ++__lasx_xvfadd_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfadd_d (_1, _2); ++} ++v8f32 ++__lasx_xvfsub_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfsub_s (_1, _2); ++} ++v4f64 ++__lasx_xvfsub_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfsub_d (_1, _2); ++} ++v8f32 ++__lasx_xvfmul_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfmul_s (_1, _2); ++} ++v4f64 ++__lasx_xvfmul_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfmul_d (_1, _2); ++} ++v8f32 ++__lasx_xvfdiv_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfdiv_s (_1, _2); ++} ++v4f64 ++__lasx_xvfdiv_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfdiv_d (_1, _2); ++} ++v16i16 ++__lasx_xvfcvt_h_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcvt_h_s (_1, _2); ++} ++v8f32 ++__lasx_xvfcvt_s_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcvt_s_d (_1, _2); ++} ++v8f32 ++__lasx_xvfmin_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfmin_s (_1, _2); ++} ++v4f64 ++__lasx_xvfmin_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfmin_d (_1, _2); ++} ++v8f32 ++__lasx_xvfmina_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfmina_s (_1, _2); ++} ++v4f64 ++__lasx_xvfmina_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfmina_d (_1, _2); ++} ++v8f32 ++__lasx_xvfmax_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfmax_s (_1, _2); ++} ++v4f64 ++__lasx_xvfmax_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfmax_d (_1, _2); ++} ++v8f32 ++__lasx_xvfmaxa_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfmaxa_s (_1, _2); ++} ++v4f64 ++__lasx_xvfmaxa_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfmaxa_d (_1, _2); ++} ++v8i32 ++__lasx_xvfclass_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfclass_s (_1); ++} ++v4i64 ++__lasx_xvfclass_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfclass_d (_1); ++} ++v8f32 ++__lasx_xvfsqrt_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfsqrt_s (_1); ++} ++v4f64 ++__lasx_xvfsqrt_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfsqrt_d (_1); ++} ++v8f32 ++__lasx_xvfrecip_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrecip_s (_1); ++} ++v4f64 ++__lasx_xvfrecip_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrecip_d (_1); ++} ++v8f32 ++__lasx_xvfrint_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrint_s (_1); ++} ++v4f64 ++__lasx_xvfrint_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrint_d (_1); ++} ++v8f32 ++__lasx_xvfrsqrt_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrsqrt_s (_1); ++} ++v4f64 ++__lasx_xvfrsqrt_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrsqrt_d (_1); ++} ++v8f32 ++__lasx_xvflogb_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvflogb_s (_1); ++} ++v4f64 ++__lasx_xvflogb_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvflogb_d (_1); ++} ++v8f32 ++__lasx_xvfcvth_s_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvfcvth_s_h (_1); ++} ++v4f64 ++__lasx_xvfcvth_d_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfcvth_d_s (_1); ++} ++v8f32 ++__lasx_xvfcvtl_s_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvfcvtl_s_h (_1); ++} ++v4f64 ++__lasx_xvfcvtl_d_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfcvtl_d_s (_1); ++} ++v8i32 ++__lasx_xvftint_w_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftint_w_s (_1); ++} ++v4i64 ++__lasx_xvftint_l_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftint_l_d (_1); ++} ++v8u32 ++__lasx_xvftint_wu_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftint_wu_s (_1); ++} ++v4u64 ++__lasx_xvftint_lu_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftint_lu_d (_1); ++} ++v8i32 ++__lasx_xvftintrz_w_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrz_w_s (_1); ++} ++v4i64 ++__lasx_xvftintrz_l_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftintrz_l_d (_1); ++} ++v8u32 ++__lasx_xvftintrz_wu_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrz_wu_s (_1); ++} ++v4u64 ++__lasx_xvftintrz_lu_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftintrz_lu_d (_1); ++} ++v8f32 ++__lasx_xvffint_s_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvffint_s_w (_1); ++} ++v4f64 ++__lasx_xvffint_d_l (v4i64 _1) ++{ ++ return __builtin_lasx_xvffint_d_l (_1); ++} ++v8f32 ++__lasx_xvffint_s_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvffint_s_wu (_1); ++} ++v4f64 ++__lasx_xvffint_d_lu (v4u64 _1) ++{ ++ return __builtin_lasx_xvffint_d_lu (_1); ++} ++v32i8 ++__lasx_xvreplve_b (v32i8 _1, int _2) ++{ ++ return __builtin_lasx_xvreplve_b (_1, _2); ++} ++v16i16 ++__lasx_xvreplve_h (v16i16 _1, int _2) ++{ ++ return __builtin_lasx_xvreplve_h (_1, _2); ++} ++v8i32 ++__lasx_xvreplve_w (v8i32 _1, int _2) ++{ ++ return __builtin_lasx_xvreplve_w (_1, _2); ++} ++v4i64 ++__lasx_xvreplve_d (v4i64 _1, int _2) ++{ ++ return __builtin_lasx_xvreplve_d (_1, _2); ++} ++v8i32 ++__lasx_xvpermi_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvpermi_w (_1, _2, 1); ++} ++v32u8 ++__lasx_xvandn_v (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvandn_v (_1, _2); ++} ++v32i8 ++__lasx_xvneg_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvneg_b (_1); ++} ++v16i16 ++__lasx_xvneg_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvneg_h (_1); ++} ++v8i32 ++__lasx_xvneg_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvneg_w (_1); ++} ++v4i64 ++__lasx_xvneg_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvneg_d (_1); ++} ++v32i8 ++__lasx_xvmuh_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmuh_b (_1, _2); ++} ++v16i16 ++__lasx_xvmuh_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmuh_h (_1, _2); ++} ++v8i32 ++__lasx_xvmuh_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmuh_w (_1, _2); ++} ++v4i64 ++__lasx_xvmuh_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmuh_d (_1, _2); ++} ++v32u8 ++__lasx_xvmuh_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvmuh_bu (_1, _2); ++} ++v16u16 ++__lasx_xvmuh_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvmuh_hu (_1, _2); ++} ++v8u32 ++__lasx_xvmuh_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvmuh_wu (_1, _2); ++} ++v4u64 ++__lasx_xvmuh_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvmuh_du (_1, _2); ++} ++v16i16 ++__lasx_xvsllwil_h_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvsllwil_h_b (_1, 1); ++} ++v8i32 ++__lasx_xvsllwil_w_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvsllwil_w_h (_1, 1); ++} ++v4i64 ++__lasx_xvsllwil_d_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvsllwil_d_w (_1, 1); ++} ++v16u16 ++__lasx_xvsllwil_hu_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvsllwil_hu_bu (_1, 1); ++} ++v8u32 ++__lasx_xvsllwil_wu_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvsllwil_wu_hu (_1, 1); ++} ++v4u64 ++__lasx_xvsllwil_du_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvsllwil_du_wu (_1, 1); ++} ++v32i8 ++__lasx_xvsran_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsran_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvsran_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsran_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvsran_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsran_w_d (_1, _2); ++} ++v32i8 ++__lasx_xvssran_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssran_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvssran_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssran_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvssran_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssran_w_d (_1, _2); ++} ++v32u8 ++__lasx_xvssran_bu_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvssran_bu_h (_1, _2); ++} ++v16u16 ++__lasx_xvssran_hu_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvssran_hu_w (_1, _2); ++} ++v8u32 ++__lasx_xvssran_wu_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvssran_wu_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrarn_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrarn_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvsrarn_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrarn_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvsrarn_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrarn_w_d (_1, _2); ++} ++v32i8 ++__lasx_xvssrarn_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrarn_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvssrarn_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrarn_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvssrarn_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrarn_w_d (_1, _2); ++} ++v32u8 ++__lasx_xvssrarn_bu_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvssrarn_bu_h (_1, _2); ++} ++v16u16 ++__lasx_xvssrarn_hu_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvssrarn_hu_w (_1, _2); ++} ++v8u32 ++__lasx_xvssrarn_wu_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvssrarn_wu_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrln_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrln_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvsrln_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrln_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvsrln_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrln_w_d (_1, _2); ++} ++v32u8 ++__lasx_xvssrln_bu_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvssrln_bu_h (_1, _2); ++} ++v16u16 ++__lasx_xvssrln_hu_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvssrln_hu_w (_1, _2); ++} ++v8u32 ++__lasx_xvssrln_wu_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvssrln_wu_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrlrn_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrlrn_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvsrlrn_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrlrn_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvsrlrn_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrlrn_w_d (_1, _2); ++} ++v32u8 ++__lasx_xvssrlrn_bu_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvssrlrn_bu_h (_1, _2); ++} ++v16u16 ++__lasx_xvssrlrn_hu_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvssrlrn_hu_w (_1, _2); ++} ++v8u32 ++__lasx_xvssrlrn_wu_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvssrlrn_wu_d (_1, _2); ++} ++v32i8 ++__lasx_xvfrstpi_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvfrstpi_b (_1, _2, 1); ++} ++v16i16 ++__lasx_xvfrstpi_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvfrstpi_h (_1, _2, 1); ++} ++v32i8 ++__lasx_xvfrstp_b (v32i8 _1, v32i8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvfrstp_b (_1, _2, _3); ++} ++v16i16 ++__lasx_xvfrstp_h (v16i16 _1, v16i16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvfrstp_h (_1, _2, _3); ++} ++v4i64 ++__lasx_xvshuf4i_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvshuf4i_d (_1, _2, 1); ++} ++v32i8 ++__lasx_xvbsrl_v (v32i8 _1) ++{ ++ return __builtin_lasx_xvbsrl_v (_1, 1); ++} ++v32i8 ++__lasx_xvbsll_v (v32i8 _1) ++{ ++ return __builtin_lasx_xvbsll_v (_1, 1); ++} ++v32i8 ++__lasx_xvextrins_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvextrins_b (_1, _2, 1); ++} ++v16i16 ++__lasx_xvextrins_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvextrins_h (_1, _2, 1); ++} ++v8i32 ++__lasx_xvextrins_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvextrins_w (_1, _2, 1); ++} ++v4i64 ++__lasx_xvextrins_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvextrins_d (_1, _2, 1); ++} ++v32i8 ++__lasx_xvmskltz_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvmskltz_b (_1); ++} ++v16i16 ++__lasx_xvmskltz_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvmskltz_h (_1); ++} ++v8i32 ++__lasx_xvmskltz_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvmskltz_w (_1); ++} ++v4i64 ++__lasx_xvmskltz_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvmskltz_d (_1); ++} ++v32i8 ++__lasx_xvsigncov_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsigncov_b (_1, _2); ++} ++v16i16 ++__lasx_xvsigncov_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsigncov_h (_1, _2); ++} ++v8i32 ++__lasx_xvsigncov_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsigncov_w (_1, _2); ++} ++v4i64 ++__lasx_xvsigncov_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsigncov_d (_1, _2); ++} ++v8f32 ++__lasx_xvfmadd_s (v8f32 _1, v8f32 _2, v8f32 _3) ++{ ++ return __builtin_lasx_xvfmadd_s (_1, _2, _3); ++} ++v4f64 ++__lasx_xvfmadd_d (v4f64 _1, v4f64 _2, v4f64 _3) ++{ ++ return __builtin_lasx_xvfmadd_d (_1, _2, _3); ++} ++v8f32 ++__lasx_xvfmsub_s (v8f32 _1, v8f32 _2, v8f32 _3) ++{ ++ return __builtin_lasx_xvfmsub_s (_1, _2, _3); ++} ++v4f64 ++__lasx_xvfmsub_d (v4f64 _1, v4f64 _2, v4f64 _3) ++{ ++ return __builtin_lasx_xvfmsub_d (_1, _2, _3); ++} ++v8f32 ++__lasx_xvfnmadd_s (v8f32 _1, v8f32 _2, v8f32 _3) ++{ ++ return __builtin_lasx_xvfnmadd_s (_1, _2, _3); ++} ++v4f64 ++__lasx_xvfnmadd_d (v4f64 _1, v4f64 _2, v4f64 _3) ++{ ++ return __builtin_lasx_xvfnmadd_d (_1, _2, _3); ++} ++v8f32 ++__lasx_xvfnmsub_s (v8f32 _1, v8f32 _2, v8f32 _3) ++{ ++ return __builtin_lasx_xvfnmsub_s (_1, _2, _3); ++} ++v4f64 ++__lasx_xvfnmsub_d (v4f64 _1, v4f64 _2, v4f64 _3) ++{ ++ return __builtin_lasx_xvfnmsub_d (_1, _2, _3); ++} ++v8i32 ++__lasx_xvftintrne_w_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrne_w_s (_1); ++} ++v4i64 ++__lasx_xvftintrne_l_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftintrne_l_d (_1); ++} ++v8i32 ++__lasx_xvftintrp_w_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrp_w_s (_1); ++} ++v4i64 ++__lasx_xvftintrp_l_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftintrp_l_d (_1); ++} ++v8i32 ++__lasx_xvftintrm_w_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrm_w_s (_1); ++} ++v4i64 ++__lasx_xvftintrm_l_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftintrm_l_d (_1); ++} ++v8i32 ++__lasx_xvftint_w_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvftint_w_d (_1, _2); ++} ++v8f32 ++__lasx_xvffint_s_l (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvffint_s_l (_1, _2); ++} ++v8i32 ++__lasx_xvftintrz_w_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvftintrz_w_d (_1, _2); ++} ++v8i32 ++__lasx_xvftintrp_w_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvftintrp_w_d (_1, _2); ++} ++v8i32 ++__lasx_xvftintrm_w_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvftintrm_w_d (_1, _2); ++} ++v8i32 ++__lasx_xvftintrne_w_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvftintrne_w_d (_1, _2); ++} ++v4i64 ++__lasx_xvftinth_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftinth_l_s (_1); ++} ++v4i64 ++__lasx_xvftintl_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintl_l_s (_1); ++} ++v4f64 ++__lasx_xvffinth_d_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvffinth_d_w (_1); ++} ++v4f64 ++__lasx_xvffintl_d_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvffintl_d_w (_1); ++} ++v4i64 ++__lasx_xvftintrzh_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrzh_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrzl_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrzl_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrph_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrph_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrpl_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrpl_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrmh_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrmh_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrml_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrml_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrneh_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrneh_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrnel_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrnel_l_s (_1); ++} ++v8f32 ++__lasx_xvfrintrne_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrintrne_s (_1); ++} ++v4f64 ++__lasx_xvfrintrne_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrintrne_d (_1); ++} ++v8f32 ++__lasx_xvfrintrz_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrintrz_s (_1); ++} ++v4f64 ++__lasx_xvfrintrz_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrintrz_d (_1); ++} ++v8f32 ++__lasx_xvfrintrp_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrintrp_s (_1); ++} ++v4f64 ++__lasx_xvfrintrp_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrintrp_d (_1); ++} ++v8f32 ++__lasx_xvfrintrm_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrintrm_s (_1); ++} ++v4f64 ++__lasx_xvfrintrm_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrintrm_d (_1); ++} ++v32i8 ++__lasx_xvld (void *_1) ++{ ++ return __builtin_lasx_xvld (_1, 1); ++} ++void ++__lasx_xvst (v32i8 _1, void *_2) ++{ ++ return __builtin_lasx_xvst (_1, _2, 1); ++} ++void ++__lasx_xvstelm_b (v32i8 _1, void *_2) ++{ ++ return __builtin_lasx_xvstelm_b (_1, _2, 1, 1); ++} ++void ++__lasx_xvstelm_h (v16i16 _1, void *_2) ++{ ++ return __builtin_lasx_xvstelm_h (_1, _2, 2, 1); ++} ++void ++__lasx_xvstelm_w (v8i32 _1, void *_2) ++{ ++ return __builtin_lasx_xvstelm_w (_1, _2, 4, 1); ++} ++void ++__lasx_xvstelm_d (v4i64 _1, void *_2) ++{ ++ return __builtin_lasx_xvstelm_d (_1, _2, 8, 1); ++} ++v8i32 ++__lasx_xvinsve0_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvinsve0_w (_1, _2, 1); ++} ++v4i64 ++__lasx_xvinsve0_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvinsve0_d (_1, _2, 1); ++} ++v8i32 ++__lasx_xvpickve_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvpickve_w (_1, 1); ++} ++v4i64 ++__lasx_xvpickve_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvpickve_d (_1, 1); ++} ++v32i8 ++__lasx_xvssrlrn_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrlrn_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvssrlrn_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrlrn_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvssrlrn_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrlrn_w_d (_1, _2); ++} ++v32i8 ++__lasx_xvssrln_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrln_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvssrln_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrln_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvssrln_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrln_w_d (_1, _2); ++} ++v32i8 ++__lasx_xvorn_v (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvorn_v (_1, _2); ++} ++v4i64 ++__lasx_xvldi () ++{ ++ return __builtin_lasx_xvldi (1); ++} ++v32i8 ++__lasx_xvldx (void *_1) ++{ ++ return __builtin_lasx_xvldx (_1, 1); ++} ++void ++__lasx_xvstx (v32i8 _1, void *_2) ++{ ++ return __builtin_lasx_xvstx (_1, _2, 1); ++} ++v4u64 ++__lasx_xvextl_qu_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvextl_qu_du (_1); ++} ++v8i32 ++__lasx_xvinsgr2vr_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvinsgr2vr_w (_1, 1, 1); ++} ++v4i64 ++__lasx_xvinsgr2vr_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvinsgr2vr_d (_1, 1, 1); ++} ++v32i8 ++__lasx_xvreplve0_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvreplve0_b (_1); ++} ++v16i16 ++__lasx_xvreplve0_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvreplve0_h (_1); ++} ++v8i32 ++__lasx_xvreplve0_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvreplve0_w (_1); ++} ++v4i64 ++__lasx_xvreplve0_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvreplve0_d (_1); ++} ++v32i8 ++__lasx_xvreplve0_q (v32i8 _1) ++{ ++ return __builtin_lasx_xvreplve0_q (_1); ++} ++v16i16 ++__lasx_vext2xv_h_b (v32i8 _1) ++{ ++ return __builtin_lasx_vext2xv_h_b (_1); ++} ++v8i32 ++__lasx_vext2xv_w_h (v16i16 _1) ++{ ++ return __builtin_lasx_vext2xv_w_h (_1); ++} ++v4i64 ++__lasx_vext2xv_d_w (v8i32 _1) ++{ ++ return __builtin_lasx_vext2xv_d_w (_1); ++} ++v8i32 ++__lasx_vext2xv_w_b (v32i8 _1) ++{ ++ return __builtin_lasx_vext2xv_w_b (_1); ++} ++v4i64 ++__lasx_vext2xv_d_h (v16i16 _1) ++{ ++ return __builtin_lasx_vext2xv_d_h (_1); ++} ++v4i64 ++__lasx_vext2xv_d_b (v32i8 _1) ++{ ++ return __builtin_lasx_vext2xv_d_b (_1); ++} ++v16i16 ++__lasx_vext2xv_hu_bu (v32i8 _1) ++{ ++ return __builtin_lasx_vext2xv_hu_bu (_1); ++} ++v8i32 ++__lasx_vext2xv_wu_hu (v16i16 _1) ++{ ++ return __builtin_lasx_vext2xv_wu_hu (_1); ++} ++v4i64 ++__lasx_vext2xv_du_wu (v8i32 _1) ++{ ++ return __builtin_lasx_vext2xv_du_wu (_1); ++} ++v8i32 ++__lasx_vext2xv_wu_bu (v32i8 _1) ++{ ++ return __builtin_lasx_vext2xv_wu_bu (_1); ++} ++v4i64 ++__lasx_vext2xv_du_hu (v16i16 _1) ++{ ++ return __builtin_lasx_vext2xv_du_hu (_1); ++} ++v4i64 ++__lasx_vext2xv_du_bu (v32i8 _1) ++{ ++ return __builtin_lasx_vext2xv_du_bu (_1); ++} ++v32i8 ++__lasx_xvpermi_q (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvpermi_q (_1, _2, 1); ++} ++v4i64 ++__lasx_xvpermi_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvpermi_d (_1, 1); ++} ++v8i32 ++__lasx_xvperm_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvperm_w (_1, _2); ++} ++v32i8 ++__lasx_xvldrepl_b (void *_1) ++{ ++ return __builtin_lasx_xvldrepl_b (_1, 1); ++} ++v16i16 ++__lasx_xvldrepl_h (void *_1) ++{ ++ return __builtin_lasx_xvldrepl_h (_1, 2); ++} ++v8i32 ++__lasx_xvldrepl_w (void *_1) ++{ ++ return __builtin_lasx_xvldrepl_w (_1, 4); ++} ++v4i64 ++__lasx_xvldrepl_d (void *_1) ++{ ++ return __builtin_lasx_xvldrepl_d (_1, 8); ++} ++int ++__lasx_xvpickve2gr_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvpickve2gr_w (_1, 1); ++} ++unsigned int ++__lasx_xvpickve2gr_wu (v8i32 _1) ++{ ++ return __builtin_lasx_xvpickve2gr_wu (_1, 1); ++} ++long ++__lasx_xvpickve2gr_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvpickve2gr_d (_1, 1); ++} ++unsigned long int ++__lasx_xvpickve2gr_du (v4i64 _1) ++{ ++ return __builtin_lasx_xvpickve2gr_du (_1, 1); ++} ++v4i64 ++__lasx_xvaddwev_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvaddwev_q_d (_1, _2); ++} ++v4i64 ++__lasx_xvaddwev_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvaddwev_d_w (_1, _2); ++} ++v8i32 ++__lasx_xvaddwev_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvaddwev_w_h (_1, _2); ++} ++v16i16 ++__lasx_xvaddwev_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvaddwev_h_b (_1, _2); ++} ++v4i64 ++__lasx_xvaddwev_q_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvaddwev_q_du (_1, _2); ++} ++v4i64 ++__lasx_xvaddwev_d_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvaddwev_d_wu (_1, _2); ++} ++v8i32 ++__lasx_xvaddwev_w_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvaddwev_w_hu (_1, _2); ++} ++v16i16 ++__lasx_xvaddwev_h_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvaddwev_h_bu (_1, _2); ++} ++v4i64 ++__lasx_xvsubwev_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsubwev_q_d (_1, _2); ++} ++v4i64 ++__lasx_xvsubwev_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsubwev_d_w (_1, _2); ++} ++v8i32 ++__lasx_xvsubwev_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsubwev_w_h (_1, _2); ++} ++v16i16 ++__lasx_xvsubwev_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsubwev_h_b (_1, _2); ++} ++v4i64 ++__lasx_xvsubwev_q_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvsubwev_q_du (_1, _2); ++} ++v4i64 ++__lasx_xvsubwev_d_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvsubwev_d_wu (_1, _2); ++} ++v8i32 ++__lasx_xvsubwev_w_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvsubwev_w_hu (_1, _2); ++} ++v16i16 ++__lasx_xvsubwev_h_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvsubwev_h_bu (_1, _2); ++} ++v4i64 ++__lasx_xvmulwev_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmulwev_q_d (_1, _2); ++} ++v4i64 ++__lasx_xvmulwev_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmulwev_d_w (_1, _2); ++} ++v8i32 ++__lasx_xvmulwev_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmulwev_w_h (_1, _2); ++} ++v16i16 ++__lasx_xvmulwev_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmulwev_h_b (_1, _2); ++} ++v4i64 ++__lasx_xvmulwev_q_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvmulwev_q_du (_1, _2); ++} ++v4i64 ++__lasx_xvmulwev_d_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvmulwev_d_wu (_1, _2); ++} ++v8i32 ++__lasx_xvmulwev_w_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvmulwev_w_hu (_1, _2); ++} ++v16i16 ++__lasx_xvmulwev_h_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvmulwev_h_bu (_1, _2); ++} ++v4i64 ++__lasx_xvaddwod_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvaddwod_q_d (_1, _2); ++} ++v4i64 ++__lasx_xvaddwod_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvaddwod_d_w (_1, _2); ++} ++v8i32 ++__lasx_xvaddwod_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvaddwod_w_h (_1, _2); ++} ++v16i16 ++__lasx_xvaddwod_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvaddwod_h_b (_1, _2); ++} ++v4i64 ++__lasx_xvaddwod_q_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvaddwod_q_du (_1, _2); ++} ++v4i64 ++__lasx_xvaddwod_d_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvaddwod_d_wu (_1, _2); ++} ++v8i32 ++__lasx_xvaddwod_w_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvaddwod_w_hu (_1, _2); ++} ++v16i16 ++__lasx_xvaddwod_h_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvaddwod_h_bu (_1, _2); ++} ++v4i64 ++__lasx_xvsubwod_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsubwod_q_d (_1, _2); ++} ++v4i64 ++__lasx_xvsubwod_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsubwod_d_w (_1, _2); ++} ++v8i32 ++__lasx_xvsubwod_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsubwod_w_h (_1, _2); ++} ++v16i16 ++__lasx_xvsubwod_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsubwod_h_b (_1, _2); ++} ++v4i64 ++__lasx_xvsubwod_q_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvsubwod_q_du (_1, _2); ++} ++v4i64 ++__lasx_xvsubwod_d_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvsubwod_d_wu (_1, _2); ++} ++v8i32 ++__lasx_xvsubwod_w_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvsubwod_w_hu (_1, _2); ++} ++v16i16 ++__lasx_xvsubwod_h_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvsubwod_h_bu (_1, _2); ++} ++v4i64 ++__lasx_xvmulwod_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmulwod_q_d (_1, _2); ++} ++v4i64 ++__lasx_xvmulwod_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmulwod_d_w (_1, _2); ++} ++v8i32 ++__lasx_xvmulwod_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmulwod_w_h (_1, _2); ++} ++v16i16 ++__lasx_xvmulwod_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmulwod_h_b (_1, _2); ++} ++v4i64 ++__lasx_xvmulwod_q_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvmulwod_q_du (_1, _2); ++} ++v4i64 ++__lasx_xvmulwod_d_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvmulwod_d_wu (_1, _2); ++} ++v8i32 ++__lasx_xvmulwod_w_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvmulwod_w_hu (_1, _2); ++} ++v16i16 ++__lasx_xvmulwod_h_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvmulwod_h_bu (_1, _2); ++} ++v4i64 ++__lasx_xvaddwev_d_wu_w (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvaddwev_d_wu_w (_1, _2); ++} ++v8i32 ++__lasx_xvaddwev_w_hu_h (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvaddwev_w_hu_h (_1, _2); ++} ++v16i16 ++__lasx_xvaddwev_h_bu_b (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvaddwev_h_bu_b (_1, _2); ++} ++v4i64 ++__lasx_xvmulwev_d_wu_w (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmulwev_d_wu_w (_1, _2); ++} ++v8i32 ++__lasx_xvmulwev_w_hu_h (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmulwev_w_hu_h (_1, _2); ++} ++v16i16 ++__lasx_xvmulwev_h_bu_b (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmulwev_h_bu_b (_1, _2); ++} ++v4i64 ++__lasx_xvaddwod_d_wu_w (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvaddwod_d_wu_w (_1, _2); ++} ++v8i32 ++__lasx_xvaddwod_w_hu_h (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvaddwod_w_hu_h (_1, _2); ++} ++v16i16 ++__lasx_xvaddwod_h_bu_b (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvaddwod_h_bu_b (_1, _2); ++} ++v4i64 ++__lasx_xvmulwod_d_wu_w (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmulwod_d_wu_w (_1, _2); ++} ++v8i32 ++__lasx_xvmulwod_w_hu_h (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmulwod_w_hu_h (_1, _2); ++} ++v16i16 ++__lasx_xvmulwod_h_bu_b (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmulwod_h_bu_b (_1, _2); ++} ++v4i64 ++__lasx_xvhaddw_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvhaddw_q_d (_1, _2); ++} ++v4u64 ++__lasx_xvhaddw_qu_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvhaddw_qu_du (_1, _2); ++} ++v4i64 ++__lasx_xvhsubw_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvhsubw_q_d (_1, _2); ++} ++v4u64 ++__lasx_xvhsubw_qu_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvhsubw_qu_du (_1, _2); ++} ++v4i64 ++__lasx_xvmaddwev_q_d (v4i64 _1, v4i64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvmaddwev_q_d (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwev_d_w (v4i64 _1, v8i32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvmaddwev_d_w (_1, _2, _3); ++} ++v8i32 ++__lasx_xvmaddwev_w_h (v8i32 _1, v16i16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvmaddwev_w_h (_1, _2, _3); ++} ++v16i16 ++__lasx_xvmaddwev_h_b (v16i16 _1, v32i8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvmaddwev_h_b (_1, _2, _3); ++} ++v4u64 ++__lasx_xvmaddwev_q_du (v4u64 _1, v4u64 _2, v4u64 _3) ++{ ++ return __builtin_lasx_xvmaddwev_q_du (_1, _2, _3); ++} ++v4u64 ++__lasx_xvmaddwev_d_wu (v4u64 _1, v8u32 _2, v8u32 _3) ++{ ++ return __builtin_lasx_xvmaddwev_d_wu (_1, _2, _3); ++} ++v8u32 ++__lasx_xvmaddwev_w_hu (v8u32 _1, v16u16 _2, v16u16 _3) ++{ ++ return __builtin_lasx_xvmaddwev_w_hu (_1, _2, _3); ++} ++v16u16 ++__lasx_xvmaddwev_h_bu (v16u16 _1, v32u8 _2, v32u8 _3) ++{ ++ return __builtin_lasx_xvmaddwev_h_bu (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwod_q_d (v4i64 _1, v4i64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvmaddwod_q_d (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwod_d_w (v4i64 _1, v8i32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvmaddwod_d_w (_1, _2, _3); ++} ++v8i32 ++__lasx_xvmaddwod_w_h (v8i32 _1, v16i16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvmaddwod_w_h (_1, _2, _3); ++} ++v16i16 ++__lasx_xvmaddwod_h_b (v16i16 _1, v32i8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvmaddwod_h_b (_1, _2, _3); ++} ++v4u64 ++__lasx_xvmaddwod_q_du (v4u64 _1, v4u64 _2, v4u64 _3) ++{ ++ return __builtin_lasx_xvmaddwod_q_du (_1, _2, _3); ++} ++v4u64 ++__lasx_xvmaddwod_d_wu (v4u64 _1, v8u32 _2, v8u32 _3) ++{ ++ return __builtin_lasx_xvmaddwod_d_wu (_1, _2, _3); ++} ++v8u32 ++__lasx_xvmaddwod_w_hu (v8u32 _1, v16u16 _2, v16u16 _3) ++{ ++ return __builtin_lasx_xvmaddwod_w_hu (_1, _2, _3); ++} ++v16u16 ++__lasx_xvmaddwod_h_bu (v16u16 _1, v32u8 _2, v32u8 _3) ++{ ++ return __builtin_lasx_xvmaddwod_h_bu (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwev_q_du_d (v4i64 _1, v4u64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvmaddwev_q_du_d (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwev_d_wu_w (v4i64 _1, v8u32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvmaddwev_d_wu_w (_1, _2, _3); ++} ++v8i32 ++__lasx_xvmaddwev_w_hu_h (v8i32 _1, v16u16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvmaddwev_w_hu_h (_1, _2, _3); ++} ++v16i16 ++__lasx_xvmaddwev_h_bu_b (v16i16 _1, v32u8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvmaddwev_h_bu_b (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwod_q_du_d (v4i64 _1, v4u64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvmaddwod_q_du_d (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwod_d_wu_w (v4i64 _1, v8u32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvmaddwod_d_wu_w (_1, _2, _3); ++} ++v8i32 ++__lasx_xvmaddwod_w_hu_h (v8i32 _1, v16u16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvmaddwod_w_hu_h (_1, _2, _3); ++} ++v16i16 ++__lasx_xvmaddwod_h_bu_b (v16i16 _1, v32u8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvmaddwod_h_bu_b (_1, _2, _3); ++} ++v32i8 ++__lasx_xvrotr_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvrotr_b (_1, _2); ++} ++v16i16 ++__lasx_xvrotr_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvrotr_h (_1, _2); ++} ++v8i32 ++__lasx_xvrotr_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvrotr_w (_1, _2); ++} ++v4i64 ++__lasx_xvrotr_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvrotr_d (_1, _2); ++} ++v4i64 ++__lasx_xvadd_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvadd_q (_1, _2); ++} ++v4i64 ++__lasx_xvsub_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsub_q (_1, _2); ++} ++v4i64 ++__lasx_xvaddwev_q_du_d (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvaddwev_q_du_d (_1, _2); ++} ++v4i64 ++__lasx_xvaddwod_q_du_d (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvaddwod_q_du_d (_1, _2); ++} ++v4i64 ++__lasx_xvmulwev_q_du_d (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmulwev_q_du_d (_1, _2); ++} ++v4i64 ++__lasx_xvmulwod_q_du_d (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmulwod_q_du_d (_1, _2); ++} ++v32i8 ++__lasx_xvmskgez_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvmskgez_b (_1); ++} ++v32i8 ++__lasx_xvmsknz_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvmsknz_b (_1); ++} ++v16i16 ++__lasx_xvexth_h_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvexth_h_b (_1); ++} ++v8i32 ++__lasx_xvexth_w_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvexth_w_h (_1); ++} ++v4i64 ++__lasx_xvexth_d_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvexth_d_w (_1); ++} ++v4i64 ++__lasx_xvexth_q_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvexth_q_d (_1); ++} ++v16u16 ++__lasx_xvexth_hu_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvexth_hu_bu (_1); ++} ++v8u32 ++__lasx_xvexth_wu_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvexth_wu_hu (_1); ++} ++v4u64 ++__lasx_xvexth_du_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvexth_du_wu (_1); ++} ++v4u64 ++__lasx_xvexth_qu_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvexth_qu_du (_1); ++} ++v32i8 ++__lasx_xvrotri_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvrotri_b (_1, 1); ++} ++v16i16 ++__lasx_xvrotri_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvrotri_h (_1, 1); ++} ++v8i32 ++__lasx_xvrotri_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvrotri_w (_1, 1); ++} ++v4i64 ++__lasx_xvrotri_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvrotri_d (_1, 1); ++} ++v4i64 ++__lasx_xvextl_q_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvextl_q_d (_1); ++} ++v32i8 ++__lasx_xvsrlni_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrlni_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvsrlni_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrlni_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvsrlni_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrlni_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvsrlni_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrlni_d_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvsrlrni_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrlrni_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvsrlrni_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrlrni_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvsrlrni_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrlrni_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvsrlrni_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrlrni_d_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvssrlni_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrlni_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvssrlni_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrlni_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvssrlni_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrlni_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvssrlni_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrlni_d_q (_1, _2, 1); ++} ++v32u8 ++__lasx_xvssrlni_bu_h (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrlni_bu_h (_1, _2, 1); ++} ++v16u16 ++__lasx_xvssrlni_hu_w (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrlni_hu_w (_1, _2, 1); ++} ++v8u32 ++__lasx_xvssrlni_wu_d (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrlni_wu_d (_1, _2, 1); ++} ++v4u64 ++__lasx_xvssrlni_du_q (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrlni_du_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvssrlrni_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrlrni_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvssrlrni_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrlrni_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvssrlrni_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrlrni_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvssrlrni_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrlrni_d_q (_1, _2, 1); ++} ++v32u8 ++__lasx_xvssrlrni_bu_h (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrlrni_bu_h (_1, _2, 1); ++} ++v16u16 ++__lasx_xvssrlrni_hu_w (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrlrni_hu_w (_1, _2, 1); ++} ++v8u32 ++__lasx_xvssrlrni_wu_d (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrlrni_wu_d (_1, _2, 1); ++} ++v4u64 ++__lasx_xvssrlrni_du_q (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrlrni_du_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvsrani_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrani_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvsrani_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrani_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvsrani_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrani_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvsrani_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrani_d_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvsrarni_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrarni_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvsrarni_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrarni_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvsrarni_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrarni_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvsrarni_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrarni_d_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvssrani_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrani_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvssrani_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrani_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvssrani_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrani_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvssrani_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrani_d_q (_1, _2, 1); ++} ++v32u8 ++__lasx_xvssrani_bu_h (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrani_bu_h (_1, _2, 1); ++} ++v16u16 ++__lasx_xvssrani_hu_w (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrani_hu_w (_1, _2, 1); ++} ++v8u32 ++__lasx_xvssrani_wu_d (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrani_wu_d (_1, _2, 1); ++} ++v4u64 ++__lasx_xvssrani_du_q (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrani_du_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvssrarni_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrarni_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvssrarni_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrarni_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvssrarni_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrarni_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvssrarni_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrarni_d_q (_1, _2, 1); ++} ++v32u8 ++__lasx_xvssrarni_bu_h (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrarni_bu_h (_1, _2, 1); ++} ++v16u16 ++__lasx_xvssrarni_hu_w (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrarni_hu_w (_1, _2, 1); ++} ++v8u32 ++__lasx_xvssrarni_wu_d (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrarni_wu_d (_1, _2, 1); ++} ++v4u64 ++__lasx_xvssrarni_du_q (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrarni_du_q (_1, _2, 1); ++} ++int ++__lasx_xbnz_b (v32u8 _1) ++{ ++ return __builtin_lasx_xbnz_b (_1); ++} ++int ++__lasx_xbnz_d (v4u64 _1) ++{ ++ return __builtin_lasx_xbnz_d (_1); ++} ++int ++__lasx_xbnz_h (v16u16 _1) ++{ ++ return __builtin_lasx_xbnz_h (_1); ++} ++int ++__lasx_xbnz_v (v32u8 _1) ++{ ++ return __builtin_lasx_xbnz_v (_1); ++} ++int ++__lasx_xbnz_w (v8u32 _1) ++{ ++ return __builtin_lasx_xbnz_w (_1); ++} ++int ++__lasx_xbz_b (v32u8 _1) ++{ ++ return __builtin_lasx_xbz_b (_1); ++} ++int ++__lasx_xbz_d (v4u64 _1) ++{ ++ return __builtin_lasx_xbz_d (_1); ++} ++int ++__lasx_xbz_h (v16u16 _1) ++{ ++ return __builtin_lasx_xbz_h (_1); ++} ++int ++__lasx_xbz_v (v32u8 _1) ++{ ++ return __builtin_lasx_xbz_v (_1); ++} ++int ++__lasx_xbz_w (v8u32 _1) ++{ ++ return __builtin_lasx_xbz_w (_1); ++} ++v4i64 ++__lasx_xvfcmp_caf_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_caf_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_caf_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_caf_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_ceq_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_ceq_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_ceq_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_ceq_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cle_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cle_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cle_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cle_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_clt_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_clt_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_clt_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_clt_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cne_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cne_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cne_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cne_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cor_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cor_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cor_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cor_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cueq_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cueq_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cueq_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cueq_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cule_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cule_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cule_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cule_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cult_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cult_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cult_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cult_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cun_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cun_d (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cune_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cune_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cune_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cune_s (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cun_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cun_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_saf_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_saf_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_saf_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_saf_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_seq_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_seq_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_seq_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_seq_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sle_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sle_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sle_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sle_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_slt_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_slt_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_slt_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_slt_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sne_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sne_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sne_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sne_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sor_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sor_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sor_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sor_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sueq_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sueq_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sueq_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sueq_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sule_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sule_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sule_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sule_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sult_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sult_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sult_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sult_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sun_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sun_d (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sune_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sune_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sune_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sune_s (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sun_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sun_s (_1, _2); ++} ++v4f64 ++__lasx_xvpickve_d_f (v4f64 _1) ++{ ++ return __builtin_lasx_xvpickve_d_f (_1, 1); ++} ++v8f32 ++__lasx_xvpickve_w_f (v8f32 _1) ++{ ++ return __builtin_lasx_xvpickve_w_f (_1, 1); ++} ++v32i8 ++__lasx_xvrepli_b () ++{ ++ return __builtin_lasx_xvrepli_b (1); ++} ++v4i64 ++__lasx_xvrepli_d () ++{ ++ return __builtin_lasx_xvrepli_d (1); ++} ++v16i16 ++__lasx_xvrepli_h () ++{ ++ return __builtin_lasx_xvrepli_h (1); ++} ++v8i32 ++__lasx_xvrepli_w () ++{ ++ return __builtin_lasx_xvrepli_w (1); ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-comparison-and-se.patch b/LoongArch-Add-tests-for-ASX-vector-comparison-and-se.patch new file mode 100644 index 0000000000000000000000000000000000000000..6c5dc4452e88b2bbe6d5d0f95c0c1f4bc7f9d8e7 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-comparison-and-se.patch @@ -0,0 +1,5363 @@ +From 9ccb5fcabdf69160eb360da7eab06a207f59334c Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 16:11:04 +0800 +Subject: [PATCH 113/124] LoongArch: Add tests for ASX vector comparison and + selection instruction. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvseq.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvseqi.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvseq.c | 650 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvseqi.c | 449 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvsle-1.c | 575 ++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvsle-2.c | 590 ++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvslei-1.c | 515 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvslei-2.c | 438 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvslt-1.c | 455 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvslt-2.c | 620 +++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvslti-1.c | 548 +++++++++++++++ + .../loongarch/vector/lasx/lasx-xvslti-2.c | 416 +++++++++++ + 10 files changed, 5256 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c +new file mode 100644 +index 000000000..2a42386ce +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100020001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff000000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000095120000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc9da000063f50000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc7387fff6bbfffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f0000007f000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f0000007f000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ffff00ff000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcf800fffcf800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff00fffffff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00fffffff0; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000005be55bd2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffcc8000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007dfdff4b; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ffffff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ff00ff00; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4ffc3f783fc040c0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3fc03f803fc040c0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4ffc3f783fc040c0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3fc03f803fc040c0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffee0000004c0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff050000ff3c0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00f9000000780000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffa80000ff310000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff000000ff000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff000000ff000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000005500000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001005500020000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000005500000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001005500020000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffefff7f00100080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffefff7f00100080; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff01fb0408; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff01fb0408; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff00ffffffff; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0080000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000501ffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000701ffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000501ffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000701ffffce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000260a378; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000d02317; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000260a378; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000d02317; ++ *((unsigned long *)&__m256i_op1[3]) = 0x003f020001400200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x003f00ff003f00c4; ++ *((unsigned long *)&__m256i_op1[1]) = 0x003f020001400200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x003f00ff003f00c4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c +new file mode 100644 +index 000000000..5478d19c1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c +@@ -0,0 +1,449 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffdfe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffdfe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d (__m256i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00b200b300800080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00b200b300800080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d (__m256i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00197d3200197d56; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00197d3200197d56; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000bdfef907bc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000bdfef907bc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d (__m256i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c +new file mode 100644 +index 000000000..ed752df00 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c +@@ -0,0 +1,575 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000460086; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f0079; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000f30028; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000df00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbf28b0686066be60; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffff00ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ffffff00ff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffffffff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff00ff00ffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff000000ff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffff00ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000ff00ff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ffffff00ffff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffbfffa0ffffff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffbfffa0ffffff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbfffa004fffd8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbfffa004fffd8000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ffff0000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ffff0000ff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffee; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffdfff80ffdfff80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffdfff80ffdfff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffdfff80ffdfff80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffdfff80ffdfff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff00; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffffffff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffffffff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2aaaaa85aaaaaa85; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2aaa48f4aaaa48f4; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2aaaaa85aaaaaa85; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2aaa48f4aaaa48f4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000083f95466; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010100005400; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007f00f8ff7fff80; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff6a9d8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007f00f8ff7fff80; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff6a9d8; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fe0100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fe0100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00197d3200197d56; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00197d3200197d56; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ffe0001fffe0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ffe0001fffeffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fdfdfe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00f7000000f70007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00f7000000f70007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff01fffe00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01fffe00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000002d; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc02dc02dc02dc02d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000002d; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc02dc02dc02dc02d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c +new file mode 100644 +index 000000000..bc98b41af +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c +@@ -0,0 +1,590 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8011ffae800c000c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00baff050083ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80b900b980380038; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0017ffa8008eff31; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000003ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000003ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000feb60000b7d0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000feb60000c7eb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000feb60000b7d0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000feb60000c7eb; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff010ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff010ff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb683007ffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c0df5b41cf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb683007ffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c0df5b41cf; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001497c98ea4fca; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001497c98ea4fca; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010201010204; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010102; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00020421d7d41124; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00020421d7d41124; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000180007f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffafaf80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000180007f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffafaf80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x94d7fb5200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c +new file mode 100644 +index 000000000..06717802c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000101ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00010013000100fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00010013000100fb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x014200c200c200ae; ++ *((unsigned long *)&__m256i_op0[2]) = 0x014200c200c200ae; ++ *((unsigned long *)&__m256i_op0[1]) = 0x014200c200c200ae; ++ *((unsigned long *)&__m256i_op0[0]) = 0x014200c200c200ae; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffff8900000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff8900000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000000000000; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000460086; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f0079; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000f30028; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000df00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfc2f3183ef7ffff7; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, 8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1828f0e09bad7249; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07ffc1b723953cec; ++ *((unsigned long *)&__m256i_op0[1]) = 0x61f2e9b333aab104; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6bf742aa0d7856a0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8787878a00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c +new file mode 100644 +index 000000000..093d5640e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c +@@ -0,0 +1,438 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff00; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f3c611818; ++ *((unsigned long *)&__m256i_op0[2]) = 0x032eafee29010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f3c611818; ++ *((unsigned long *)&__m256i_op0[0]) = 0x032eafee29010000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000ffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000ffffff; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00217f19ffde80e6; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00037f94fffc806b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00217f19ffde80e6; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00037f94fffc806b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000000000000; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op0[2]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op0[0]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff874dc687870000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x03802fc000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x03802fc000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c +new file mode 100644 +index 000000000..ca1f5e94f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c +@@ -0,0 +1,455 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ff000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000002000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000002000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff0000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff0000ffffffff; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000860601934; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000860601934; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000003fffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000003fffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffffffff; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000022222221; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3dddddddfbbb3bbc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000022222221; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3dddddddfbbb3bbc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf7f8f7f8f7f8f7f8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf7f8f7f8f7f8f7f8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0df9f8e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0df9f8e; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe0df9f8e; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffe0df9f8e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c +new file mode 100644 +index 000000000..6864f5eb8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c +@@ -0,0 +1,620 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1828f0e09bad7249; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07ffc1b723953cec; ++ *((unsigned long *)&__m256i_op0[1]) = 0x61f2e9b333aab104; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6bf742aa0d7856a0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0d41c9a7bdd239a7; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0b025d0ef8fdf987; ++ *((unsigned long *)&__m256i_op1[1]) = 0x002944f92da5a708; ++ *((unsigned long *)&__m256i_op1[0]) = 0x038cf4ea999922ef; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff0000ffff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xff000000ffffff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffff00ff; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdb801b6d0962003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9a7f997fff01ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbe632a4f1c3c5653; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202031; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202031; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_op1[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000040b200002fd4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007fff0000739c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000040b200002fd4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fff0000739c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbc74c3d108e05422; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbc1e3e6a5cace67c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbc74c3d108e0544a; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbc18e696a86565f4; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbc74c3d108e05422; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbc1e3e6a5cace67c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbc74c3d108e0544a; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbc18e696a86565f4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef87878000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef87878000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001400000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001400000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000017f00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007f7f03030000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100007fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100007fff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000003fbfc04; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001fdfe02; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000003fbfc04; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001fdfe02; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010511c54440437; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010511c54440437; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c +new file mode 100644 +index 000000000..7dd2778a5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c +@@ -0,0 +1,548 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe05fc47b400; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffe06003fc000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe05fc47b400; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffe06003fc000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc58a0a0a07070706; ++ *((unsigned long *)&__m256i_op0[2]) = 0x006b60e4180b0023; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1b39153f334b966a; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf1d75d79efcac002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff90ff81; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff90ff81; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000045000d0005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000045000d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000004efffe00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000047000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000004efffe00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000047000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x80000000001529c1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80007073cadc3779; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80000000001529c1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80007073cadc3779; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c +new file mode 100644 +index 000000000..d93e4314e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c +@@ -0,0 +1,416 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ff00ff00ff00; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffffffff; ++ __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffb3b4; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff5ffff4738; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffb3b4; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff5ffff4738; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000e0e0e0e0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007773; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003373; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-floating-point-co.patch b/LoongArch-Add-tests-for-ASX-vector-floating-point-co.patch new file mode 100644 index 0000000000000000000000000000000000000000..88069edee17ec906e02430bc2a5c11a58d9372ce --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-floating-point-co.patch @@ -0,0 +1,7291 @@ +From 5a014f35ac194402adc08945480da44e2c0a772a Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 16:06:04 +0800 +Subject: [PATCH 112/124] LoongArch: Add tests for ASX vector floating-point + conversion instruction. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvffinth.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvftintl.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvfcvt.c | 528 ++++++ + .../loongarch/vector/lasx/lasx-xvfcvth.c | 485 +++++ + .../loongarch/vector/lasx/lasx-xvffint-1.c | 375 ++++ + .../loongarch/vector/lasx/lasx-xvffint-2.c | 246 +++ + .../loongarch/vector/lasx/lasx-xvffinth.c | 262 +++ + .../loongarch/vector/lasx/lasx-xvfrint_d.c | 429 +++++ + .../loongarch/vector/lasx/lasx-xvfrint_s.c | 723 ++++++++ + .../loongarch/vector/lasx/lasx-xvftint-1.c | 471 +++++ + .../loongarch/vector/lasx/lasx-xvftint-2.c | 1565 ++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvftint-3.c | 511 ++++++ + .../loongarch/vector/lasx/lasx-xvftintl.c | 1580 +++++++++++++++++ + 11 files changed, 7175 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c +new file mode 100644 +index 000000000..116399a7c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c +@@ -0,0 +1,528 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000003; ++ *((int *)&__m256_op1[6]) = 0x0000000c; ++ *((int *)&__m256_op1[5]) = 0x00000011; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000005; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000008; ++ *((int *)&__m256_op1[0]) = 0x00000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[6]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[5]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[4]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[3]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[2]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[1]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[0]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[7]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[6]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[5]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[4]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[3]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[2]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[1]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[0]) = 0x6d6d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[0]) = 0x7c007c007c007c00; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00020000; ++ *((int *)&__m256_op1[6]) = 0x00020000; ++ *((int *)&__m256_op1[5]) = 0x00020000; ++ *((int *)&__m256_op1[4]) = 0x00010000; ++ *((int *)&__m256_op1[3]) = 0x00020000; ++ *((int *)&__m256_op1[2]) = 0x00020000; ++ *((int *)&__m256_op1[1]) = 0x00020000; ++ *((int *)&__m256_op1[0]) = 0x00010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x71717171; ++ *((int *)&__m256_op1[6]) = 0x71010101; ++ *((int *)&__m256_op1[5]) = 0x8e8e8e8e; ++ *((int *)&__m256_op1[4]) = 0x8f00ffff; ++ *((int *)&__m256_op1[3]) = 0x71717171; ++ *((int *)&__m256_op1[2]) = 0x71010101; ++ *((int *)&__m256_op1[1]) = 0x8e8e8e8e; ++ *((int *)&__m256_op1[0]) = 0x8f00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7c007c0080008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7c007c0080008000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xfff10000; ++ *((int *)&__m256_op0[4]) = 0xfff10000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xfff10000; ++ *((int *)&__m256_op0[0]) = 0xfff10000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xfff10000; ++ *((int *)&__m256_op1[4]) = 0xfff10000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xfff10000; ++ *((int *)&__m256_op1[0]) = 0xfff10000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff88ff88; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00040000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00040000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xff00ff00; ++ *((int *)&__m256_op0[6]) = 0x3f003f00; ++ *((int *)&__m256_op0[5]) = 0xff0101fd; ++ *((int *)&__m256_op0[4]) = 0x00010100; ++ *((int *)&__m256_op0[3]) = 0xff00ff00; ++ *((int *)&__m256_op0[2]) = 0x3f003f00; ++ *((int *)&__m256_op0[1]) = 0xff0101fd; ++ *((int *)&__m256_op0[0]) = 0x00010100; ++ *((int *)&__m256_op1[7]) = 0x01ffff43; ++ *((int *)&__m256_op1[6]) = 0x00fffeff; ++ *((int *)&__m256_op1[5]) = 0xfe0000bc; ++ *((int *)&__m256_op1[4]) = 0xff000100; ++ *((int *)&__m256_op1[3]) = 0x01ffff43; ++ *((int *)&__m256_op1[2]) = 0x00fffeff; ++ *((int *)&__m256_op1[1]) = 0xfe0000bc; ++ *((int *)&__m256_op1[0]) = 0xff000100; ++ *((unsigned long *)&__m256i_result[3]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fc00fc00; ++ *((unsigned long *)&__m256i_result[1]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fc00fc00; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0cc08723ff900001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xcc9b89f2f6cef440; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xfffffff8; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xff800000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xfffffff8; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0xff800000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xff800000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xff800000ff800000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xff800000; ++ *((int *)&__m256_result[4]) = 0xff800000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xff800000; ++ *((int *)&__m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00003f784000ff80; ++ *((unsigned long *)&__m256d_op1[1]) = 0xf7f8f7f84000fff9; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00003f784000ff80; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xff800000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xff800000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000555500005555; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000555500005555; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000555500005555; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000555500005555; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffb6804cb9; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffb7bbdec0; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffb680489b; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffb7bc02a0; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xfffffffd; ++ *((int *)&__m256_result[4]) = 0xfffffffd; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xfffffffd; ++ *((int *)&__m256_result[0]) = 0xfffffffd; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010202020203; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101010201010102; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010202020203; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101010201010102; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x3fff3fff3fff3fc4; ++ *((unsigned long *)&__m256d_op1[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x3fff3fff3fff3fc4; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x3ff9fffa; ++ *((int *)&__m256_result[4]) = 0x3ff9fffa; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x3ff9fffa; ++ *((int *)&__m256_result[0]) = 0x3ff9fffa; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c +new file mode 100644 +index 000000000..001ce1c69 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c +@@ -0,0 +1,485 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000aaaa; ++ *((int *)&__m256_op0[6]) = 0x00008bfe; ++ *((int *)&__m256_op0[5]) = 0x0000aaaa; ++ *((int *)&__m256_op0[4]) = 0x0000aaaa; ++ *((int *)&__m256_op0[3]) = 0x0000aaaa; ++ *((int *)&__m256_op0[2]) = 0x00008bfe; ++ *((int *)&__m256_op0[1]) = 0x0000aaaa; ++ *((int *)&__m256_op0[0]) = 0x0000aaaa; ++ *((unsigned long *)&__m256d_result[3]) = 0x3795554000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x37917fc000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x3795554000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x37917fc000000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0404010008080808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0408010008080808; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0404010008080808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0408010008080808; ++ *((int *)&__m256_result[7]) = 0x38808000; ++ *((int *)&__m256_result[6]) = 0x37800000; ++ *((int *)&__m256_result[5]) = 0x39010000; ++ *((int *)&__m256_result[4]) = 0x39010000; ++ *((int *)&__m256_result[3]) = 0x38808000; ++ *((int *)&__m256_result[2]) = 0x37800000; ++ *((int *)&__m256_result[1]) = 0x39010000; ++ *((int *)&__m256_result[0]) = 0x39010000; ++ __m256_out = __lasx_xvfcvth_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvth_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvth_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffe0000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000100010001fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000100010001fffe; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfcvth_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvth_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00020006; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00020006; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00020006; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00020006; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x37b0003000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x37b0003000000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffffff0; ++ *((int *)&__m256_op0[6]) = 0xfffffff0; ++ *((int *)&__m256_op0[5]) = 0xfffffff0; ++ *((int *)&__m256_op0[4]) = 0xfffffff0; ++ *((int *)&__m256_op0[3]) = 0xfffffff0; ++ *((int *)&__m256_op0[2]) = 0xfffffff0; ++ *((int *)&__m256_op0[1]) = 0xfffffff0; ++ *((int *)&__m256_op0[0]) = 0xfffffff0; ++ *((unsigned long *)&__m256d_result[3]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfffffffe00000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffe0000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf000000000000000; ++ *((int *)&__m256_result[7]) = 0xc6000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xc6000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00010000002fff9e; ++ *((int *)&__m256_result[7]) = 0x34000000; ++ *((int *)&__m256_result[6]) = 0xfff00000; ++ *((int *)&__m256_result[5]) = 0xfff6e000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x33800000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x363c0000; ++ *((int *)&__m256_result[0]) = 0xfff3c000; ++ __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80000000; ++ *((int *)&__m256_op0[6]) = 0x80000000; ++ *((int *)&__m256_op0[5]) = 0x80000000; ++ *((int *)&__m256_op0[4]) = 0xff800000; ++ *((int *)&__m256_op0[3]) = 0x80000000; ++ *((int *)&__m256_op0[2]) = 0x80000000; ++ *((int *)&__m256_op0[1]) = 0x80000000; ++ *((int *)&__m256_op0[0]) = 0xff800000; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc1d75053f0000000; ++ *((int *)&__m256_result[7]) = 0xc03ae000; ++ *((int *)&__m256_result[6]) = 0x420a6000; ++ *((int *)&__m256_result[5]) = 0xc6000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xc03ae000; ++ *((int *)&__m256_result[2]) = 0x420a6000; ++ *((int *)&__m256_result[1]) = 0xc6000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x03802fc000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x03802fc000000000; ++ *((int *)&__m256_result[7]) = 0x38600000; ++ *((int *)&__m256_result[6]) = 0x3df80000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x38600000; ++ *((int *)&__m256_result[2]) = 0x3df80000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffe0000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c +new file mode 100644 +index 000000000..dd04fd788 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c +@@ -0,0 +1,375 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xbff0000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001700080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001700080; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x4177000800000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x4177000800000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xc1f0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xc1f0000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256d_result[3]) = 0x437fe01fe01fe020; ++ *((unsigned long *)&__m256d_result[2]) = 0x437fe01fe01fe020; ++ *((unsigned long *)&__m256d_result[1]) = 0x437fe01fe01fe020; ++ *((unsigned long *)&__m256d_result[0]) = 0x437fe01fe01fe020; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op0[2]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op0[0]) = 0x132feea900000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x4393a0a5bc606060; ++ *((unsigned long *)&__m256d_result[2]) = 0x43b32feea9000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x4393a0a5bc606060; ++ *((unsigned long *)&__m256d_result[0]) = 0x43b32feea9000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc5c085372cfabfba; ++ *((unsigned long *)&__m256i_op0[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0658f2dc0eb21e3c; ++ *((int *)&__m256_result[7]) = 0x4e5cba76; ++ *((int *)&__m256_result[6]) = 0xcdbaaa78; ++ *((int *)&__m256_result[5]) = 0xce68fdeb; ++ *((int *)&__m256_result[4]) = 0x4e33eaff; ++ *((int *)&__m256_result[3]) = 0x4e45cc2d; ++ *((int *)&__m256_result[2]) = 0xcda41b30; ++ *((int *)&__m256_result[1]) = 0x4ccb1e5c; ++ *((int *)&__m256_result[0]) = 0x4d6b21e4; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x4efffe00; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x47000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x4efffe00; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x47000000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff00; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x477f0000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x477f0000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010001000030000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010001000030000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010001000030000; ++ *((int *)&__m256_result[7]) = 0x49800080; ++ *((int *)&__m256_result[6]) = 0x48400000; ++ *((int *)&__m256_result[5]) = 0x49800080; ++ *((int *)&__m256_result[4]) = 0x48400000; ++ *((int *)&__m256_result[3]) = 0x49800080; ++ *((int *)&__m256_result[2]) = 0x48400000; ++ *((int *)&__m256_result[1]) = 0x49800080; ++ *((int *)&__m256_result[0]) = 0x48400000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x4f800000; ++ __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc74180000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff884580000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0xbf800000; ++ *((int *)&__m256_result[6]) = 0xbf800000; ++ *((int *)&__m256_result[5]) = 0xd662fa00; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xbf800000; ++ *((int *)&__m256_result[2]) = 0xbf800000; ++ *((int *)&__m256_result[1]) = 0xd6ef7500; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000005000000020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000005000000020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000; ++ *((int *)&__m256_result[7]) = 0xdf000000; ++ *((int *)&__m256_result[6]) = 0x52a00000; ++ *((int *)&__m256_result[5]) = 0x5b7f00ff; ++ *((int *)&__m256_result[4]) = 0x5b7f00ff; ++ *((int *)&__m256_result[3]) = 0xdf000000; ++ *((int *)&__m256_result[2]) = 0x52a00000; ++ *((int *)&__m256_result[1]) = 0x5b7f00ff; ++ *((int *)&__m256_result[0]) = 0x5b7f00ff; ++ __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x5d20a0a1; ++ *((int *)&__m256_result[6]) = 0x5d20a0a1; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x5d20a0a1; ++ *((int *)&__m256_result[2]) = 0x5d20a0a1; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c +new file mode 100644 +index 000000000..3e2b15507 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c +@@ -0,0 +1,246 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x4370100000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x4370100000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256d_result[3]) = 0x43c0101010101010; ++ *((unsigned long *)&__m256d_result[2]) = 0x43c0101010101032; ++ *((unsigned long *)&__m256d_result[1]) = 0x43c0101010101010; ++ *((unsigned long *)&__m256d_result[0]) = 0x43c0101010101032; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40efffe09fa88260; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6b07ca8e013fbf01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40efffe09fa7e358; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80ce32be3e827f00; ++ *((unsigned long *)&__m256d_result[3]) = 0x43d03bfff827ea21; ++ *((unsigned long *)&__m256d_result[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long *)&__m256d_result[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long *)&__m256d_result[0]) = 0x43e019c657c7d050; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x43f0000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x41f0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x41f0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x41f0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x41f0000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256d_result[3]) = 0x4380100810101008; ++ *((unsigned long *)&__m256d_result[2]) = 0x4380100810101008; ++ *((unsigned long *)&__m256d_result[1]) = 0x4380100810101008; ++ *((unsigned long *)&__m256d_result[0]) = 0x4380100810101008; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x41f0000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffbf7f00007fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffe651ffffbfff; ++ *((int *)&__m256_result[7]) = 0x4f800000; ++ *((int *)&__m256_result[6]) = 0x4f800000; ++ *((int *)&__m256_result[5]) = 0x4f7fffbf; ++ *((int *)&__m256_result[4]) = 0x46fffe00; ++ *((int *)&__m256_result[3]) = 0x4f800000; ++ *((int *)&__m256_result[2]) = 0x4f800000; ++ *((int *)&__m256_result[1]) = 0x4f7fffe6; ++ *((int *)&__m256_result[0]) = 0x4f7fffc0; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((int *)&__m256_result[7]) = 0x4b808080; ++ *((int *)&__m256_result[6]) = 0x4b808080; ++ *((int *)&__m256_result[5]) = 0x4f800000; ++ *((int *)&__m256_result[4]) = 0x4f7fffff; ++ *((int *)&__m256_result[3]) = 0x4b808080; ++ *((int *)&__m256_result[2]) = 0x4b808080; ++ *((int *)&__m256_result[1]) = 0x4f800000; ++ *((int *)&__m256_result[0]) = 0x4f800000; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x41000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x41000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x41000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x41000000; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000020; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x42800000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x42000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x42800000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x42000000; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c +new file mode 100644 +index 000000000..e310ff5ee +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c +@@ -0,0 +1,262 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0e2d5626ff75cdbc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5db4b156e2002a78; ++ *((unsigned long *)&__m256i_op0[1]) = 0xeeffbeb03ba3e6b0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0c16e25eb28d27ea; ++ *((unsigned long *)&__m256d_result[3]) = 0x41ac5aac4c000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xc161464880000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xc1b1004150000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x41cdd1f358000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000006f0000007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000006f0000007f; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256d_result[3]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256d_result[1]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x41d8585858400000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x41dfffc000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x41dfffdfffc00000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000007f3a40; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffb79fb74; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffb79fb74; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256d_result[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xc192181230000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xc192181230000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xbff0000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ffffffff00; ++ *((unsigned long *)&__m256d_result[3]) = 0x40efffe000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x40efffe000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x41dffc0000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x41dffc0000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256d_result[3]) = 0xc039000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xc039000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xc039000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xc039000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x41d6600000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x41d6600000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256d_result[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256d_result[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256d_result[0]) = 0xc1d75053f0000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x403f000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x403f000000000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00f7000000f70006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00f7000000f70006; ++ *((unsigned long *)&__m256d_result[3]) = 0x416ee00000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x416ee000c0000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x416ee00000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x416ee000c0000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff000000000080; ++ *((unsigned long *)&__m256d_result[3]) = 0x416fe00000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x4060000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x416fe00000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x4060000000000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x41cfe01dde000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x41cfe01dde000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c +new file mode 100644 +index 000000000..4babf1638 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c +@@ -0,0 +1,429 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++/* { dg-timeout 500 } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x0); ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000008050501; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000008050501; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrint_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_result[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_result[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_result[0]) = 0xfffffffffffffff8; ++ __m256d_out = __lasx_xvfrint_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000080008001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7c00000880008000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256d_op0[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256d_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256d_result[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256d_result[0]) = 0x6040190d00000000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256d_op0[2]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256d_op0[0]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x4084800000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x4084800000000000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_result[3]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_result[2]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_result[1]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_result[0]) = 0xffff0001ffff0001; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x3fffbfff80000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00004000007f8000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x3fffbfff80000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00004000007f8000; ++ *((unsigned long *)&__m256d_result[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x3ff0000000000000; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xfffffefe00000000; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000100da000100fd; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0001ffe20001fefd; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0001009a000100fd; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0001ff640001fefd; ++ *((unsigned long *)&__m256d_result[3]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x3ff0000000000000; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256d_op0[2]) = 0x01fc03fc01fc03fc; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256d_op0[0]) = 0x01fc03fc01fc03fc; ++ *((unsigned long *)&__m256d_result[3]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256d_result[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256d_result[0]) = 0x3ff0000000000000; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0218ff78fc38fc38; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0218ff78fc38fc38; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256d_result[3]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256d_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfc00000000000048; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_result[3]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_result[2]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_result[1]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_result[0]) = 0xfffffff0fffffff0; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x017e017e01dd61de; ++ *((unsigned long *)&__m256d_op0[2]) = 0x5d637d043bc4fc43; ++ *((unsigned long *)&__m256d_op0[1]) = 0x01dcc2dce31bc35d; ++ *((unsigned long *)&__m256d_op0[0]) = 0x5e041d245b85fc43; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x5d637d043bc4fc43; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x5e041d245b85fc43; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_result[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_result[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_result[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_result[0]) = 0x7c007c007c007c00; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x5); ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000800000098; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000040000ffca; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000800000098; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000040000ff79; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000781; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c +new file mode 100644 +index 000000000..9f2fa6747 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c +@@ -0,0 +1,723 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++/* { dg-timeout 500 } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffff5f5c; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffff605a; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffff5f5c; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffff605a; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffff5f5c; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffff605a; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffff5f5c; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffff605a; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xc5c5c5c4; ++ *((int *)&__m256_op0[6]) = 0xc5c5c5c4; ++ *((int *)&__m256_op0[5]) = 0x45c5c5c5; ++ *((int *)&__m256_op0[4]) = 0x45c5c5c5; ++ *((int *)&__m256_op0[3]) = 0xc5c5c5c4; ++ *((int *)&__m256_op0[2]) = 0xc5c5c5c4; ++ *((int *)&__m256_op0[1]) = 0x45c5c5c5; ++ *((int *)&__m256_op0[0]) = 0x45c5c5c5; ++ *((int *)&__m256_result[7]) = 0xc5c5c800; ++ *((int *)&__m256_result[6]) = 0xc5c5c800; ++ *((int *)&__m256_result[5]) = 0x45c5c800; ++ *((int *)&__m256_result[4]) = 0x45c5c800; ++ *((int *)&__m256_result[3]) = 0xc5c5c800; ++ *((int *)&__m256_result[2]) = 0xc5c5c800; ++ *((int *)&__m256_result[1]) = 0x45c5c800; ++ *((int *)&__m256_result[0]) = 0x45c5c800; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffff6f20; ++ *((int *)&__m256_op0[5]) = 0x0000781e; ++ *((int *)&__m256_op0[4]) = 0x0000f221; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffff6f20; ++ *((int *)&__m256_op0[1]) = 0x0000781e; ++ *((int *)&__m256_op0[0]) = 0x0000f221; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0xffff6f20; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0xffff6f20; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffffb3b4; ++ *((int *)&__m256_op0[5]) = 0xfffffff5; ++ *((int *)&__m256_op0[4]) = 0xffff4738; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffffb3b4; ++ *((int *)&__m256_op0[1]) = 0xfffffff5; ++ *((int *)&__m256_op0[0]) = 0xffff4738; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0xffffb3b4; ++ *((int *)&__m256_result[5]) = 0xfffffff5; ++ *((int *)&__m256_result[4]) = 0xffff4738; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0xffffb3b4; ++ *((int *)&__m256_result[1]) = 0xfffffff5; ++ *((int *)&__m256_result[0]) = 0xffff4738; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00ff0000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00ff0000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00ff0000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00ff0000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00003fea; ++ *((int *)&__m256_op0[6]) = 0x00013feb; ++ *((int *)&__m256_op0[5]) = 0x00003fe9; ++ *((int *)&__m256_op0[4]) = 0x00014022; ++ *((int *)&__m256_op0[3]) = 0x00003fea; ++ *((int *)&__m256_op0[2]) = 0x00013feb; ++ *((int *)&__m256_op0[1]) = 0x00003fe9; ++ *((int *)&__m256_op0[0]) = 0x00014022; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01010101; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x01010101; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x01010101; ++ *((int *)&__m256_op0[2]) = 0x01010101; ++ *((int *)&__m256_op0[1]) = 0x01010101; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01010101; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x01010101; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x01010101; ++ *((int *)&__m256_op0[2]) = 0x01010101; ++ *((int *)&__m256_op0[1]) = 0x01010101; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x55555555; ++ *((int *)&__m256_op0[6]) = 0x36aaaaac; ++ *((int *)&__m256_op0[5]) = 0x55555555; ++ *((int *)&__m256_op0[4]) = 0xaaaaaaac; ++ *((int *)&__m256_op0[3]) = 0x55555555; ++ *((int *)&__m256_op0[2]) = 0x36aaaaac; ++ *((int *)&__m256_op0[1]) = 0x55555555; ++ *((int *)&__m256_op0[0]) = 0xaaaaaaac; ++ *((int *)&__m256_result[7]) = 0x55555555; ++ *((int *)&__m256_result[6]) = 0x3f800000; ++ *((int *)&__m256_result[5]) = 0x55555555; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x55555555; ++ *((int *)&__m256_result[2]) = 0x3f800000; ++ *((int *)&__m256_result[1]) = 0x55555555; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffc741; ++ *((int *)&__m256_op0[6]) = 0x8a023680; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffff8845; ++ *((int *)&__m256_op0[2]) = 0xbb954b00; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffc741; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xffff8845; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00200101; ++ *((int *)&__m256_op0[6]) = 0x01610000; ++ *((int *)&__m256_op0[5]) = 0x00612000; ++ *((int *)&__m256_op0[4]) = 0x00610000; ++ *((int *)&__m256_op0[3]) = 0x00200101; ++ *((int *)&__m256_op0[2]) = 0x01610000; ++ *((int *)&__m256_op0[1]) = 0x00612000; ++ *((int *)&__m256_op0[0]) = 0x00610000; ++ *((int *)&__m256_result[7]) = 0x3f800000; ++ *((int *)&__m256_result[6]) = 0x3f800000; ++ *((int *)&__m256_result[5]) = 0x3f800000; ++ *((int *)&__m256_result[4]) = 0x3f800000; ++ *((int *)&__m256_result[3]) = 0x3f800000; ++ *((int *)&__m256_result[2]) = 0x3f800000; ++ *((int *)&__m256_result[1]) = 0x3f800000; ++ *((int *)&__m256_result[0]) = 0x3f800000; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xfefefefe; ++ *((int *)&__m256_op0[4]) = 0x01010101; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xfefefefe; ++ *((int *)&__m256_op0[0]) = 0x01010101; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xfefefefe; ++ *((int *)&__m256_result[4]) = 0x3f800000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xfefefefe; ++ *((int *)&__m256_result[0]) = 0x3f800000; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x1c1c1c1c; ++ *((int *)&__m256_op0[6]) = 0x1c1c1c1c; ++ *((int *)&__m256_op0[5]) = 0xfffffffe; ++ *((int *)&__m256_op0[4]) = 0xffffff00; ++ *((int *)&__m256_op0[3]) = 0x1c1c1c1c; ++ *((int *)&__m256_op0[2]) = 0x1c1c1c1c; ++ *((int *)&__m256_op0[1]) = 0xfffffffe; ++ *((int *)&__m256_op0[0]) = 0xffffff00; ++ *((int *)&__m256_result[7]) = 0x3f800000; ++ *((int *)&__m256_result[6]) = 0x3f800000; ++ *((int *)&__m256_result[5]) = 0xfffffffe; ++ *((int *)&__m256_result[4]) = 0xffffff00; ++ *((int *)&__m256_result[3]) = 0x3f800000; ++ *((int *)&__m256_result[2]) = 0x3f800000; ++ *((int *)&__m256_result[1]) = 0xfffffffe; ++ *((int *)&__m256_result[0]) = 0xffffff00; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000008; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00080000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x5d20a0a1; ++ *((int *)&__m256_op0[6]) = 0x5d20a0a1; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x5d20a0a1; ++ *((int *)&__m256_op0[2]) = 0x5d20a0a1; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x5d20a0a1; ++ *((int *)&__m256_result[6]) = 0x5d20a0a1; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x5d20a0a1; ++ *((int *)&__m256_result[2]) = 0x5d20a0a1; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x001d001d; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x001d001d; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000033; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000033; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000300; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000303; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xfffffffe; ++ *((int *)&__m256_op0[5]) = 0xfffffffe; ++ *((int *)&__m256_op0[4]) = 0xfffffefc; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xfffffffe; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xfffffffe; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xfffffffe; ++ *((int *)&__m256_result[5]) = 0xfffffffe; ++ *((int *)&__m256_result[4]) = 0xfffffefc; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xfffffffe; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xfffffffe; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x0001c4e8; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x0001c4e8; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80000000; ++ *((int *)&__m256_op0[6]) = 0x80000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x80000000; ++ *((int *)&__m256_op0[2]) = 0x80000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xf5fffc00; ++ *((int *)&__m256_op0[6]) = 0xfc000000; ++ *((int *)&__m256_op0[5]) = 0xf5fffc00; ++ *((int *)&__m256_op0[4]) = 0xfc000000; ++ *((int *)&__m256_op0[3]) = 0xf5fffc00; ++ *((int *)&__m256_op0[2]) = 0xfc000000; ++ *((int *)&__m256_op0[1]) = 0xf5fffc00; ++ *((int *)&__m256_op0[0]) = 0xfc000000; ++ *((int *)&__m256_result[7]) = 0xf5fffc00; ++ *((int *)&__m256_result[6]) = 0xfc000000; ++ *((int *)&__m256_result[5]) = 0xf5fffc00; ++ *((int *)&__m256_result[4]) = 0xfc000000; ++ *((int *)&__m256_result[3]) = 0xf5fffc00; ++ *((int *)&__m256_result[2]) = 0xfc000000; ++ *((int *)&__m256_result[1]) = 0xf5fffc00; ++ *((int *)&__m256_result[0]) = 0xfc000000; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c +new file mode 100644 +index 000000000..c75468d42 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c +@@ -0,0 +1,471 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffcb423a587053; ++ *((unsigned long *)&__m256d_op0[2]) = 0x6d46f43e71141b81; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffcb423a584528; ++ *((unsigned long *)&__m256d_op0[0]) = 0x9bdf36c8d78158a1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffc03b1fc5e050; ++ *((unsigned long *)&__m256d_op0[2]) = 0x6a9e3fa2603a2000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffc03b1fc5e050; ++ *((unsigned long *)&__m256d_op0[0]) = 0x6a9e3fa2603a2000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000001c9880; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000001c9880; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x1828f0e09bad7249; ++ *((unsigned long *)&__m256d_op0[2]) = 0x07ffc1b723953cec; ++ *((unsigned long *)&__m256d_op0[1]) = 0x61f2e9b333aab104; ++ *((unsigned long *)&__m256d_op0[0]) = 0x6bf742aa0d7856a0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00ffffff1e9e9e9e; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff9e9eb09e; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00ffffff1e9e9e9e; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff9e9eb09e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c +new file mode 100644 +index 000000000..ad72f7596 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c +@@ -0,0 +1,1565 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x0000ffff; ++ *((int *)&__m256_op0[6]) = 0xc0008001; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0xc0008001; ++ *((int *)&__m256_op0[3]) = 0x0000ffff; ++ *((int *)&__m256_op0[2]) = 0xc0008001; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0xc0008001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffffe; ++ __m256i_out = __lasx_xvftint_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x3f3f3f3c; ++ *((int *)&__m256_op0[5]) = 0xc6c6c6c6; ++ *((int *)&__m256_op0[4]) = 0x8787878a; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x3f3f3f3c; ++ *((int *)&__m256_op0[1]) = 0x8787878a; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff9c9d00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x1f0fdf7f; ++ *((int *)&__m256_op0[6]) = 0x3e3b31d4; ++ *((int *)&__m256_op0[5]) = 0x7ff80000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x1f0fdf7f; ++ *((int *)&__m256_op0[2]) = 0x3e3b31d4; ++ *((int *)&__m256_op0[1]) = 0x7ff80000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000200000003; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0080000200000003; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x55555555; ++ *((int *)&__m256_op0[5]) = 0x00000001; ++ *((int *)&__m256_op0[4]) = 0x00000004; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x55555555; ++ *((int *)&__m256_op0[1]) = 0x00000001; ++ *((int *)&__m256_op0[0]) = 0x00000004; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00ff00ffff0000ff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00ff00ffff0000ff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fe363637fe36364; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fe363637fe36364; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000020000000b; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000020000000a; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000000000000000a; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000000000000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000505; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x40000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x40000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256d_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256d_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256d_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000004040104; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffd1108199; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000714910f9; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000000030000000c; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000001100000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000500000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000800000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffff7e; ++ *((int *)&__m256_op0[4]) = 0xffffff46; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffff7e; ++ *((int *)&__m256_op0[0]) = 0xffffff46; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000001fe01fe; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000ff0100; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000001fe01fe; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000ff0100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0fffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x0fffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x0fffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x0fffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfd12fd12; ++ *((int *)&__m256_op0[6]) = 0xfd12fd12; ++ *((int *)&__m256_op0[5]) = 0xfd12fd12; ++ *((int *)&__m256_op0[4]) = 0xfd12fd12; ++ *((int *)&__m256_op0[3]) = 0xfd12fd12; ++ *((int *)&__m256_op0[2]) = 0xfd12fd12; ++ *((int *)&__m256_op0[1]) = 0xfd12fd12; ++ *((int *)&__m256_op0[0]) = 0xfd12fd12; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffe4ffe6; ++ *((int *)&__m256_op0[6]) = 0xffe5ffe6; ++ *((int *)&__m256_op0[5]) = 0xffe4ffe6; ++ *((int *)&__m256_op0[4]) = 0xffe5ffe6; ++ *((int *)&__m256_op0[3]) = 0xffe4ffe6; ++ *((int *)&__m256_op0[2]) = 0xffe5ffe6; ++ *((int *)&__m256_op0[1]) = 0xffe4ffe6; ++ *((int *)&__m256_op0[0]) = 0xffe5ffe6; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000001; ++ *((int *)&__m256_op0[4]) = 0x00010102; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80008000; ++ *((int *)&__m256_op0[6]) = 0x80008000; ++ *((int *)&__m256_op0[5]) = 0x80008000; ++ *((int *)&__m256_op0[4]) = 0x80008000; ++ *((int *)&__m256_op0[3]) = 0x80008000; ++ *((int *)&__m256_op0[2]) = 0x80008000; ++ *((int *)&__m256_op0[1]) = 0x80008000; ++ *((int *)&__m256_op0[0]) = 0x80008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x10000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x10000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00ff00ff; ++ *((int *)&__m256_op0[6]) = 0x00ff00ff; ++ *((int *)&__m256_op0[5]) = 0x00ff00ff; ++ *((int *)&__m256_op0[4]) = 0x00ff00ff; ++ *((int *)&__m256_op0[3]) = 0x00ff00ff; ++ *((int *)&__m256_op0[2]) = 0x00ff00ff; ++ *((int *)&__m256_op0[1]) = 0x00ff00ff; ++ *((int *)&__m256_op0[0]) = 0x00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0010001000107878; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0010001000107878; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0040000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0040000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0040000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0040000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00003fea00013fec; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00003fe50001c013; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00003fea00013fec; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00003fe50001c013; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000180000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000180000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffff000000010000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000095120000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xc9da000063f50000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xc7387fff6bbfffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x4001000100020000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffefffe; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xfffefffe; ++ *((int *)&__m256_op0[2]) = 0xfffefffd; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0707feb6; ++ *((int *)&__m256_op0[6]) = 0x0707b7d0; ++ *((int *)&__m256_op0[5]) = 0x45baa7ef; ++ *((int *)&__m256_op0[4]) = 0x6a95a985; ++ *((int *)&__m256_op0[3]) = 0x0707feb6; ++ *((int *)&__m256_op0[2]) = 0x0707b7d0; ++ *((int *)&__m256_op0[1]) = 0x45baa7ef; ++ *((int *)&__m256_op0[0]) = 0x6a95a985; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000017547fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000017547fffffff; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[6]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[5]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[4]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[3]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[2]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[1]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[0]) = 0x6d6d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256d_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256d_op1[2]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256d_op1[0]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256d_op1[2]) = 0x8000000100000001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256d_op1[0]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xfff10000; ++ *((int *)&__m256_op0[4]) = 0xfff10000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xfff10000; ++ *((int *)&__m256_op0[0]) = 0xfff10000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xfdfcfda8; ++ *((int *)&__m256_op0[5]) = 0x0000e282; ++ *((int *)&__m256_op0[4]) = 0x1d20ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xfdfcfda8; ++ *((int *)&__m256_op0[1]) = 0x0000e282; ++ *((int *)&__m256_op0[0]) = 0x1d20ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0080000000800000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0080000000800000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0080000000800000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256d_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256d_op1[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256d_op1[2]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256d_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256d_op1[0]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0002fffc; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffff0000fffd0003; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0002fffc; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffff0000fffd0003; ++ *((unsigned long *)&__m256d_op1[3]) = 0x003f020001400200; ++ *((unsigned long *)&__m256d_op1[2]) = 0x003f00ff003f00c4; ++ *((unsigned long *)&__m256d_op1[1]) = 0x003f020001400200; ++ *((unsigned long *)&__m256d_op1[0]) = 0x003f00ff003f00c4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffff0ffff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffff0ffff0000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[0]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x002e2100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x34000000fff00000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfff6e00000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x3380000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x363c0000fff3c000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000000030000000c; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000001100000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000500000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000800000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xa5a5a5a5a5a5a5a5; ++ *((unsigned long *)&__m256d_op1[2]) = 0xa5a5a5a5a5a5a5ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xa5a5a5a5a5a5a5a5; ++ *((unsigned long *)&__m256d_op1[0]) = 0xa5a5a5a5a5a5a5ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x55555555; ++ *((int *)&__m256_op0[6]) = 0x55555555; ++ *((int *)&__m256_op0[5]) = 0x5d5d5d5d; ++ *((int *)&__m256_op0[4]) = 0x5d555d55; ++ *((int *)&__m256_op0[3]) = 0x55555555; ++ *((int *)&__m256_op0[2]) = 0x55555555; ++ *((int *)&__m256_op0[1]) = 0x5d5ca2a3; ++ *((int *)&__m256_op0[0]) = 0x5d54aaab; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0b085bfc00000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0b004bc000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0b085bfc00000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0b004bc000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffeeffaf; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000011; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffeeffaf; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000011; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00ff00ff; ++ *((int *)&__m256_op0[6]) = 0x00ff00ff; ++ *((int *)&__m256_op0[5]) = 0x00ff00ff; ++ *((int *)&__m256_op0[4]) = 0x00ff00ff; ++ *((int *)&__m256_op0[3]) = 0x00ff00ff; ++ *((int *)&__m256_op0[2]) = 0x00ff00ff; ++ *((int *)&__m256_op0[1]) = 0x00ff00ff; ++ *((int *)&__m256_op0[0]) = 0x00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x001d001d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256d_op0[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x81fa28e4; ++ *((int *)&__m256_op0[6]) = 0x81fa28e4; ++ *((int *)&__m256_op0[5]) = 0x81fa28e4; ++ *((int *)&__m256_op0[4]) = 0x81fa28e4; ++ *((int *)&__m256_op0[3]) = 0x81fa28e4; ++ *((int *)&__m256_op0[2]) = 0x81fa28e4; ++ *((int *)&__m256_op0[1]) = 0x81fa28e4; ++ *((int *)&__m256_op0[0]) = 0x81fa28e4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c +new file mode 100644 +index 000000000..19db4e192 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c +@@ -0,0 +1,511 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffefffe; ++ *((int *)&__m256_op0[6]) = 0xfffefffe; ++ *((int *)&__m256_op0[5]) = 0xfffefffe; ++ *((int *)&__m256_op0[4]) = 0xfffefffe; ++ *((int *)&__m256_op0[3]) = 0xfffefffe; ++ *((int *)&__m256_op0[2]) = 0xfffefffe; ++ *((int *)&__m256_op0[1]) = 0xfffefffe; ++ *((int *)&__m256_op0[0]) = 0xfffefffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000200; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000200; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000200; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000200; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffffff1; ++ *((int *)&__m256_op0[6]) = 0xfffffff1; ++ *((int *)&__m256_op0[5]) = 0xfffffff1; ++ *((int *)&__m256_op0[4]) = 0xfffffff1; ++ *((int *)&__m256_op0[3]) = 0xfffffff1; ++ *((int *)&__m256_op0[2]) = 0xfffffff1; ++ *((int *)&__m256_op0[1]) = 0xfffffff1; ++ *((int *)&__m256_op0[0]) = 0xfffffff1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x43ef8787; ++ *((int *)&__m256_op0[4]) = 0x8000ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x43ef8787; ++ *((int *)&__m256_op0[0]) = 0x8000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000001df00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000001df00000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0x00030005; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0x00030005; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7ff80000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x7ff80000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x7ff80000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x7ff80000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000002; ++ *((int *)&__m256_op0[6]) = 0x00000002; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000002; ++ *((int *)&__m256_op0[2]) = 0x00000002; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7ff00000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x7ff00000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x7ff00000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x7ff00000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00016e00; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00016e00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x38a966b301f41ffd; ++ *((unsigned long *)&__m256d_op0[2]) = 0x5f6108ee13ff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xf41a56e8d10201f6; ++ *((unsigned long *)&__m256d_op0[0]) = 0x683b8b34f1020001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256d_op0[2]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long *)&__m256d_op0[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256d_op0[0]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000007a00f8; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00ff00ff01640092; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000007a00f8; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00ff00ff01640092; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000007fff80fe; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000007fff80fe; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff80007ffe; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ff007fff80fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000781; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000408080c111414; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000408080c111414; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000408080c111414; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000008e8c000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000000fffc000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000008e8c000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000000fffc000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c +new file mode 100644 +index 000000000..b0fdf7e0b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c +@@ -0,0 +1,1580 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0xc58a0a0a; ++ *((int *)&__m256_op0[6]) = 0x07070706; ++ *((int *)&__m256_op0[5]) = 0x006b60e4; ++ *((int *)&__m256_op0[4]) = 0x180b0023; ++ *((int *)&__m256_op0[3]) = 0x1b39153f; ++ *((int *)&__m256_op0[2]) = 0x334b966a; ++ *((int *)&__m256_op0[1]) = 0xf1d75d79; ++ *((int *)&__m256_op0[0]) = 0xefcac002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x40404040; ++ *((int *)&__m256_op0[6]) = 0x40404040; ++ *((int *)&__m256_op0[5]) = 0x40404040; ++ *((int *)&__m256_op0[4]) = 0x40404040; ++ *((int *)&__m256_op0[3]) = 0x40404040; ++ *((int *)&__m256_op0[2]) = 0x40404040; ++ *((int *)&__m256_op0[1]) = 0x40404040; ++ *((int *)&__m256_op0[0]) = 0x40404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00080000; ++ *((int *)&__m256_op0[4]) = 0x00000010; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00080000; ++ *((int *)&__m256_op0[0]) = 0x00000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x40f69fe6; ++ *((int *)&__m256_op0[6]) = 0x3c26f4f5; ++ *((int *)&__m256_op0[5]) = 0x7ff7ffff; ++ *((int *)&__m256_op0[4]) = 0x00000007; ++ *((int *)&__m256_op0[3]) = 0x40f69fe6; ++ *((int *)&__m256_op0[2]) = 0x3c26f4f5; ++ *((int *)&__m256_op0[1]) = 0x7ff7ffff; ++ *((int *)&__m256_op0[0]) = 0x00000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00060000; ++ *((int *)&__m256_op0[6]) = 0x00040000; ++ *((int *)&__m256_op0[5]) = 0x00020000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00060000; ++ *((int *)&__m256_op0[2]) = 0x00040000; ++ *((int *)&__m256_op0[1]) = 0x00020000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffff0000; ++ *((int *)&__m256_op0[4]) = 0xffff0000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffff0000; ++ *((int *)&__m256_op0[0]) = 0xffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x55550000; ++ *((int *)&__m256_op0[6]) = 0x55550000; ++ *((int *)&__m256_op0[5]) = 0x55550000; ++ *((int *)&__m256_op0[4]) = 0x55550000; ++ *((int *)&__m256_op0[3]) = 0x55550000; ++ *((int *)&__m256_op0[2]) = 0x55550000; ++ *((int *)&__m256_op0[1]) = 0x55550000; ++ *((int *)&__m256_op0[0]) = 0x55550000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000d5000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000d5000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000d5000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000d5000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x007f8080; ++ *((int *)&__m256_op0[6]) = 0x007f007f; ++ *((int *)&__m256_op0[5]) = 0x007f8080; ++ *((int *)&__m256_op0[4]) = 0x007f007f; ++ *((int *)&__m256_op0[3]) = 0x007f8080; ++ *((int *)&__m256_op0[2]) = 0x007f007f; ++ *((int *)&__m256_op0[1]) = 0x007f8080; ++ *((int *)&__m256_op0[0]) = 0x007f007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x08e8c000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0fffc000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x08e8c000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0fffc000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000002; ++ *((int *)&__m256_op0[4]) = 0x00000008; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000002; ++ *((int *)&__m256_op0[0]) = 0x00000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7f1d7f7f; ++ *((int *)&__m256_op0[6]) = 0x7f1d7f3b; ++ *((int *)&__m256_op0[5]) = 0x02020102; ++ *((int *)&__m256_op0[4]) = 0x02020102; ++ *((int *)&__m256_op0[3]) = 0x7f1d7f7f; ++ *((int *)&__m256_op0[2]) = 0x7f1d7f3b; ++ *((int *)&__m256_op0[1]) = 0x02020102; ++ *((int *)&__m256_op0[0]) = 0x02020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000102; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x39ffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x39ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x80000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x80000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x80000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x000055ff; ++ *((int *)&__m256_op0[6]) = 0x01f90ab5; ++ *((int *)&__m256_op0[5]) = 0xaa95eaff; ++ *((int *)&__m256_op0[4]) = 0xfec6e01f; ++ *((int *)&__m256_op0[3]) = 0x000055ff; ++ *((int *)&__m256_op0[2]) = 0x01f90ab5; ++ *((int *)&__m256_op0[1]) = 0xaa95eaff; ++ *((int *)&__m256_op0[0]) = 0xfec6e01f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffeb683; ++ *((int *)&__m256_op0[6]) = 0x9ffffd80; ++ *((int *)&__m256_op0[5]) = 0xfffe97c0; ++ *((int *)&__m256_op0[4]) = 0x20010001; ++ *((int *)&__m256_op0[3]) = 0xfffeb683; ++ *((int *)&__m256_op0[2]) = 0x9ffffd80; ++ *((int *)&__m256_op0[1]) = 0xfffe97c0; ++ *((int *)&__m256_op0[0]) = 0x20010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x000000ff; ++ *((int *)&__m256_op0[6]) = 0x000000f8; ++ *((int *)&__m256_op0[5]) = 0xbc8ff0ff; ++ *((int *)&__m256_op0[4]) = 0xffffcff8; ++ *((int *)&__m256_op0[3]) = 0x000000ff; ++ *((int *)&__m256_op0[2]) = 0x000000f8; ++ *((int *)&__m256_op0[1]) = 0xbc8ff0ff; ++ *((int *)&__m256_op0[0]) = 0xffffcff8; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x00000001; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x00000001; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7fe37fe3; ++ *((int *)&__m256_op0[6]) = 0x001d001d; ++ *((int *)&__m256_op0[5]) = 0x7fff7fff; ++ *((int *)&__m256_op0[4]) = 0x7fff0000; ++ *((int *)&__m256_op0[3]) = 0x7fe37fe3; ++ *((int *)&__m256_op0[2]) = 0x001d001d; ++ *((int *)&__m256_op0[1]) = 0x7fff7fff; ++ *((int *)&__m256_op0[0]) = 0x7fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000010; ++ *((int *)&__m256_op0[6]) = 0x00000010; ++ *((int *)&__m256_op0[5]) = 0x00000010; ++ *((int *)&__m256_op0[4]) = 0x00000010; ++ *((int *)&__m256_op0[3]) = 0x00000010; ++ *((int *)&__m256_op0[2]) = 0x00000010; ++ *((int *)&__m256_op0[1]) = 0x00000010; ++ *((int *)&__m256_op0[0]) = 0x00000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x8b141414; ++ *((int *)&__m256_op0[4]) = 0x0e0e0e0e; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x36722a7e; ++ *((int *)&__m256_op0[0]) = 0x66972cd6; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x6a9e3f9a; ++ *((int *)&__m256_op0[4]) = 0x603a2001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x6a9e3f9a; ++ *((int *)&__m256_op0[0]) = 0x603a2001; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000fafe; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000fafe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00fffefe; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xfffffffc; ++ *((int *)&__m256_op0[4]) = 0x5556aaa8; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xfffffffc; ++ *((int *)&__m256_op0[0]) = 0x5556aaa8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffcc80; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x7dfdff4b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x002a5429; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x002a5429; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x77777777; ++ *((int *)&__m256_op0[6]) = 0xf7777777; ++ *((int *)&__m256_op0[5]) = 0xf7777777; ++ *((int *)&__m256_op0[4]) = 0x77777777; ++ *((int *)&__m256_op0[3]) = 0x77777777; ++ *((int *)&__m256_op0[2]) = 0xf7777777; ++ *((int *)&__m256_op0[1]) = 0xf7777777; ++ *((int *)&__m256_op0[0]) = 0x77777777; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000009; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000009; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000009; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x010c7fbc; ++ *((int *)&__m256_op0[6]) = 0x7e1c7e1c; ++ *((int *)&__m256_op0[5]) = 0xfe000000; ++ *((int *)&__m256_op0[4]) = 0x00000024; ++ *((int *)&__m256_op0[3]) = 0x010c7fbc; ++ *((int *)&__m256_op0[2]) = 0x7e1c7e1c; ++ *((int *)&__m256_op0[1]) = 0xfe000000; ++ *((int *)&__m256_op0[0]) = 0x00000024; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffffe20; ++ *((int *)&__m256_op0[6]) = 0x001dfe1f; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xfffffe20; ++ *((int *)&__m256_op0[2]) = 0x001dfe1f; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffe1; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffe1; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffe1; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffe1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000040; ++ *((int *)&__m256_op0[6]) = 0x00000020; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000040; ++ *((int *)&__m256_op0[2]) = 0x00000020; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xfefefeff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xff295329; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xfefefeff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xff295329; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xff00ffff; ++ *((int *)&__m256_op0[6]) = 0xff00ffff; ++ *((int *)&__m256_op0[5]) = 0xff00ffff; ++ *((int *)&__m256_op0[4]) = 0xff00ffff; ++ *((int *)&__m256_op0[3]) = 0xff00ffff; ++ *((int *)&__m256_op0[2]) = 0xff00ffff; ++ *((int *)&__m256_op0[1]) = 0xff00ffff; ++ *((int *)&__m256_op0[0]) = 0xff00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7fefffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x7fefffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x02020102; ++ *((int *)&__m256_op0[6]) = 0x02020102; ++ *((int *)&__m256_op0[5]) = 0x02020102; ++ *((int *)&__m256_op0[4]) = 0x02020102; ++ *((int *)&__m256_op0[3]) = 0x02020102; ++ *((int *)&__m256_op0[2]) = 0x02020102; ++ *((int *)&__m256_op0[1]) = 0x02020102; ++ *((int *)&__m256_op0[0]) = 0x02020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x515f93f0; ++ *((int *)&__m256_op0[6]) = 0x23600fb9; ++ *((int *)&__m256_op0[5]) = 0x948b39e0; ++ *((int *)&__m256_op0[4]) = 0xb7405f6f; ++ *((int *)&__m256_op0[3]) = 0x48ef0878; ++ *((int *)&__m256_op0[2]) = 0x00007c83; ++ *((int *)&__m256_op0[1]) = 0x78af877c; ++ *((int *)&__m256_op0[0]) = 0x7d7f86f9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000df93f0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000077843; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x17171717; ++ *((int *)&__m256_op0[6]) = 0x17171717; ++ *((int *)&__m256_op0[5]) = 0x000607f7; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x17171717; ++ *((int *)&__m256_op0[2]) = 0x17171717; ++ *((int *)&__m256_op0[1]) = 0x000607f7; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00ff00ff; ++ *((int *)&__m256_op0[6]) = 0x00ff00ff; ++ *((int *)&__m256_op0[5]) = 0x00ff00ff; ++ *((int *)&__m256_op0[4]) = 0x017e01fe; ++ *((int *)&__m256_op0[3]) = 0x017e00ff; ++ *((int *)&__m256_op0[2]) = 0x017e00ff; ++ *((int *)&__m256_op0[1]) = 0x00ff00ff; ++ *((int *)&__m256_op0[0]) = 0x017e01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfefefefe; ++ *((int *)&__m256_op0[6]) = 0xfefefefe; ++ *((int *)&__m256_op0[5]) = 0xfe8bfe0e; ++ *((int *)&__m256_op0[4]) = 0xfe8bfe12; ++ *((int *)&__m256_op0[3]) = 0xfefefefe; ++ *((int *)&__m256_op0[2]) = 0xfefefefe; ++ *((int *)&__m256_op0[1]) = 0xfe8bfe0e; ++ *((int *)&__m256_op0[0]) = 0xfe8bfe12; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x71717171; ++ *((int *)&__m256_op0[6]) = 0x71010101; ++ *((int *)&__m256_op0[5]) = 0x8e8e8e8e; ++ *((int *)&__m256_op0[4]) = 0x8f00ffff; ++ *((int *)&__m256_op0[3]) = 0x71717171; ++ *((int *)&__m256_op0[2]) = 0x71010101; ++ *((int *)&__m256_op0[1]) = 0x8e8e8e8e; ++ *((int *)&__m256_op0[0]) = 0x8f00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00060000; ++ *((int *)&__m256_op0[6]) = 0x00040000; ++ *((int *)&__m256_op0[5]) = 0x00020000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00060000; ++ *((int *)&__m256_op0[2]) = 0x00040000; ++ *((int *)&__m256_op0[1]) = 0x00020000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xff1cff1c; ++ *((int *)&__m256_op0[6]) = 0xff1cff1c; ++ *((int *)&__m256_op0[5]) = 0xff1cff1c; ++ *((int *)&__m256_op0[4]) = 0xff1cff1c; ++ *((int *)&__m256_op0[3]) = 0xff1cff1c; ++ *((int *)&__m256_op0[2]) = 0xff1cff1c; ++ *((int *)&__m256_op0[1]) = 0xff1cff1c; ++ *((int *)&__m256_op0[0]) = 0xff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x000fffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x000fffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00002262; ++ *((int *)&__m256_op0[6]) = 0x00005111; ++ *((int *)&__m256_op0[5]) = 0x0000165e; ++ *((int *)&__m256_op0[4]) = 0x0000480d; ++ *((int *)&__m256_op0[3]) = 0x00002262; ++ *((int *)&__m256_op0[2]) = 0x00005111; ++ *((int *)&__m256_op0[1]) = 0x0000165e; ++ *((int *)&__m256_op0[0]) = 0x0000480d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00040004; ++ *((int *)&__m256_op0[6]) = 0x00040004; ++ *((int *)&__m256_op0[5]) = 0x00040005; ++ *((int *)&__m256_op0[4]) = 0x00040005; ++ *((int *)&__m256_op0[3]) = 0x00040004; ++ *((int *)&__m256_op0[2]) = 0x00040004; ++ *((int *)&__m256_op0[1]) = 0x00040005; ++ *((int *)&__m256_op0[0]) = 0x00040005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-floating-point-op.patch b/LoongArch-Add-tests-for-ASX-vector-floating-point-op.patch new file mode 100644 index 0000000000000000000000000000000000000000..9232f2370c318b315e366719f6b795c820ba9ba2 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-floating-point-op.patch @@ -0,0 +1,5614 @@ +From 9a9935e736a9289e0a1c0a77f4110c206ce36bd2 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 16:03:17 +0800 +Subject: [PATCH 111/124] LoongArch: Add tests for ASX vector floating-point + operation instruction. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvfadd_d.c | 545 +++++++++++ + .../loongarch/vector/lasx/lasx-xvfadd_s.c | 911 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvfclass_d.c | 152 +++ + .../loongarch/vector/lasx/lasx-xvfclass_s.c | 95 ++ + .../loongarch/vector/lasx/lasx-xvflogb_d.c | 86 ++ + .../loongarch/vector/lasx/lasx-xvflogb_s.c | 115 +++ + .../loongarch/vector/lasx/lasx-xvfmadd_d.c | 382 ++++++++ + .../loongarch/vector/lasx/lasx-xvfmadd_s.c | 720 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvfmax_d.c | 230 +++++ + .../loongarch/vector/lasx/lasx-xvfmax_s.c | 560 +++++++++++ + .../loongarch/vector/lasx/lasx-xvfmaxa_d.c | 230 +++++ + .../loongarch/vector/lasx/lasx-xvfmaxa_s.c | 506 ++++++++++ + .../loongarch/vector/lasx/lasx-xvfsqrt_d.c | 482 +++++++++ + .../loongarch/vector/lasx/lasx-xvfsqrt_s.c | 457 +++++++++ + 14 files changed, 5471 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c +new file mode 100644 +index 000000000..657a19e58 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c +@@ -0,0 +1,545 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffe06df0d7; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffbe8b470f; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffbe8b470f; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x41d6600000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x41d6600000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x41d6600000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x41d6600000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7fffffffffffffff; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_result[2]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_result[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_result[0]) = 0x00007fff00007fff; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000f000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000f000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256d_result[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256d_result[0]) = 0x7fffffffa2beb040; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000001c000000134; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000001c000000134; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000001c000000134; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000001c000000134; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000038000000268; ++ *((unsigned long *)&__m256d_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000038000000268; ++ *((unsigned long *)&__m256d_result[0]) = 0x7fff7fff7fff7fff; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000001010100; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000405; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000001010100; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000405; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000001010100; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000405; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000001010100; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000405; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000040; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256d_result[3]) = 0x00000000ff890000; ++ *((unsigned long *)&__m256d_result[2]) = 0x00000000ff790000; ++ *((unsigned long *)&__m256d_result[1]) = 0x00000000ff890000; ++ *((unsigned long *)&__m256d_result[0]) = 0x00000000ff790000; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000000000006d; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000000010006d; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000000000006d; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000000010006d; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000080040; ++ *((unsigned long *)&__m256d_result[3]) = 0x00000000000000ad; ++ *((unsigned long *)&__m256d_result[2]) = 0x00000000001800ad; ++ *((unsigned long *)&__m256d_result[1]) = 0x00000000000000ad; ++ *((unsigned long *)&__m256d_result[0]) = 0x00000000001800ad; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x2020000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x2020000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7fffffffffffffff; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffff8000; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000400000001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000400000001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000010100000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000010100000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00008000003f0000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00390015003529c1; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00008000003f0000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00390015003529c1; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00007ffe81fdfe03; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x80007ffe81fdfe03; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xff00d5007f00ffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xff00d5007f00ffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x7f00d5007f00ffff; ++ *((unsigned long *)&__m256d_result[2]) = 0x7f00ffffff00ffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x7f00d5007f00ffff; ++ *((unsigned long *)&__m256d_result[0]) = 0x7f00ffffff00ffff; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000002; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000002; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffff00000002; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffff00000002; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_result[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_result[0]) = 0x00ff00fe00ff00fe; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000005536aaaaac; ++ *((unsigned long *)&__m256d_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000005536aaaaac; ++ *((unsigned long *)&__m256d_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0002555400000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0002555400000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffe367cc82f8989a; ++ *((unsigned long *)&__m256d_op0[2]) = 0x4f90000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffc3aaa8d58f43c8; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x41cc5bb8a95fd1eb; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x41cc5bb8a95fd1eb; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c +new file mode 100644 +index 000000000..4002c4074 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c +@@ -0,0 +1,911 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0x00000001; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000002; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0x00000001; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000002; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0x00000001; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000002; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0x00000001; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000002; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x40b2bf4d; ++ *((int *)&__m256_op0[6]) = 0x30313031; ++ *((int *)&__m256_op0[5]) = 0x50005000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x40b2bf4d; ++ *((int *)&__m256_op0[2]) = 0x30313031; ++ *((int *)&__m256_op0[1]) = 0x50005000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x22be22be; ++ *((int *)&__m256_op1[5]) = 0x7fff7fff; ++ *((int *)&__m256_op1[4]) = 0xa2bea2be; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x22be22be; ++ *((int *)&__m256_op1[1]) = 0x7fff7fff; ++ *((int *)&__m256_op1[0]) = 0xa2bea2be; ++ *((int *)&__m256_result[7]) = 0x40b2bf4d; ++ *((int *)&__m256_result[6]) = 0x30313031; ++ *((int *)&__m256_result[5]) = 0x7fff7fff; ++ *((int *)&__m256_result[4]) = 0xa2bea2be; ++ *((int *)&__m256_result[3]) = 0x40b2bf4d; ++ *((int *)&__m256_result[2]) = 0x30313031; ++ *((int *)&__m256_result[1]) = 0x7fff7fff; ++ *((int *)&__m256_result[0]) = 0xa2bea2be; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00ff0000; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00ff0000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00ff0000; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00ff0000; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000008c; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000008c; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x0000008c; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x0000008c; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000118; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000118; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffff8000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffff8000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffff8000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffff8000; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffff0101; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffff0101; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffff0101; ++ *((int *)&__m256_result[4]) = 0x00000001; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffff0101; ++ *((int *)&__m256_result[0]) = 0x00000001; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x10101011; ++ *((int *)&__m256_op1[4]) = 0x10101011; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x11111112; ++ *((int *)&__m256_op1[0]) = 0x11111112; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00060000; ++ *((int *)&__m256_op0[6]) = 0x00040000; ++ *((int *)&__m256_op0[5]) = 0x00020000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00060000; ++ *((int *)&__m256_op0[2]) = 0x00040000; ++ *((int *)&__m256_op0[1]) = 0x00020000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00060000; ++ *((int *)&__m256_op1[6]) = 0x00040000; ++ *((int *)&__m256_op1[5]) = 0x00020000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00060000; ++ *((int *)&__m256_op1[2]) = 0x00040000; ++ *((int *)&__m256_op1[1]) = 0x00020000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x000000ff; ++ *((int *)&__m256_op0[4]) = 0x000000ff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x000000ff; ++ *((int *)&__m256_op0[0]) = 0x000000ff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000101; ++ *((int *)&__m256_op1[4]) = 0x00000101; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000101; ++ *((int *)&__m256_op1[0]) = 0x00000101; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffff001f; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x007fe268; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffff001f; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x007fe268; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0xffff001f; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x007fe268; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0xffff001f; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x007fe268; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0xffff001f; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0xffff001f; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x7f800000; ++ *((int *)&__m256_op1[6]) = 0x7f800000; ++ *((int *)&__m256_op1[5]) = 0x7f800000; ++ *((int *)&__m256_op1[4]) = 0x7f800000; ++ *((int *)&__m256_op1[3]) = 0x7f800000; ++ *((int *)&__m256_op1[2]) = 0x7f800000; ++ *((int *)&__m256_op1[1]) = 0x7f800000; ++ *((int *)&__m256_op1[0]) = 0x7f800000; ++ *((int *)&__m256_result[7]) = 0xff800000; ++ *((int *)&__m256_result[6]) = 0xff800000; ++ *((int *)&__m256_result[5]) = 0xff800000; ++ *((int *)&__m256_result[4]) = 0xff800000; ++ *((int *)&__m256_result[3]) = 0xff800000; ++ *((int *)&__m256_result[2]) = 0xff800000; ++ *((int *)&__m256_result[1]) = 0xff800000; ++ *((int *)&__m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x02a54290; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x02a54290; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x02a54290; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x0154dc84; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x02a54290; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000089; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x82a54290; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x028aa700; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x82a54290; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x02a54287; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00004200; ++ *((int *)&__m256_op0[6]) = 0x80000000; ++ *((int *)&__m256_op0[5]) = 0x5fff5fff; ++ *((int *)&__m256_op0[4]) = 0x607f0000; ++ *((int *)&__m256_op0[3]) = 0x00004200; ++ *((int *)&__m256_op0[2]) = 0x80000000; ++ *((int *)&__m256_op0[1]) = 0x5fff5fff; ++ *((int *)&__m256_op0[0]) = 0x607f0000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00004200; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x5fff5fff; ++ *((int *)&__m256_result[4]) = 0x607f0000; ++ *((int *)&__m256_result[3]) = 0x00004200; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x5fff5fff; ++ *((int *)&__m256_result[0]) = 0x607f0000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00800080; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000202; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00800080; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000202; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00800080; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000202; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00800080; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000202; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffefffe; ++ *((int *)&__m256_op0[6]) = 0xfffefffe; ++ *((int *)&__m256_op0[5]) = 0xfffefffe; ++ *((int *)&__m256_op0[4]) = 0xfffefffe; ++ *((int *)&__m256_op0[3]) = 0xfffefffe; ++ *((int *)&__m256_op0[2]) = 0xfffefffe; ++ *((int *)&__m256_op0[1]) = 0xfffefffe; ++ *((int *)&__m256_op0[0]) = 0xfffefffe; ++ *((int *)&__m256_op1[7]) = 0x000023a3; ++ *((int *)&__m256_op1[6]) = 0x00003fff; ++ *((int *)&__m256_op1[5]) = 0x000023a3; ++ *((int *)&__m256_op1[4]) = 0x00003fef; ++ *((int *)&__m256_op1[3]) = 0x000023a3; ++ *((int *)&__m256_op1[2]) = 0x00003fff; ++ *((int *)&__m256_op1[1]) = 0x000023a3; ++ *((int *)&__m256_op1[0]) = 0x00003fef; ++ *((int *)&__m256_result[7]) = 0xfffefffe; ++ *((int *)&__m256_result[6]) = 0xfffefffe; ++ *((int *)&__m256_result[5]) = 0xfffefffe; ++ *((int *)&__m256_result[4]) = 0xfffefffe; ++ *((int *)&__m256_result[3]) = 0xfffefffe; ++ *((int *)&__m256_result[2]) = 0xfffefffe; ++ *((int *)&__m256_result[1]) = 0xfffefffe; ++ *((int *)&__m256_result[0]) = 0xfffefffe; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x002a542a; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x002a542a; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7fc00000; ++ *((int *)&__m256_result[4]) = 0x7fc00000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7fc00000; ++ *((int *)&__m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00fe00fe; ++ *((int *)&__m256_op0[6]) = 0x00fe00fe; ++ *((int *)&__m256_op0[5]) = 0x00fe00fe; ++ *((int *)&__m256_op0[4]) = 0x00fe00fe; ++ *((int *)&__m256_op0[3]) = 0x00fe00fe; ++ *((int *)&__m256_op0[2]) = 0x00fe00fe; ++ *((int *)&__m256_op0[1]) = 0x00fe00fe; ++ *((int *)&__m256_op0[0]) = 0x00fe00fe; ++ *((int *)&__m256_op1[7]) = 0x00fe00fe; ++ *((int *)&__m256_op1[6]) = 0x00fe00fe; ++ *((int *)&__m256_op1[5]) = 0x00fe00fe; ++ *((int *)&__m256_op1[4]) = 0x00fe00fe; ++ *((int *)&__m256_op1[3]) = 0x00fe00fe; ++ *((int *)&__m256_op1[2]) = 0x00fe00fe; ++ *((int *)&__m256_op1[1]) = 0x00fe00fe; ++ *((int *)&__m256_op1[0]) = 0x00fe00fe; ++ *((int *)&__m256_result[7]) = 0x3f800000; ++ *((int *)&__m256_result[6]) = 0x3f800000; ++ *((int *)&__m256_result[5]) = 0x3f800000; ++ *((int *)&__m256_result[4]) = 0x3f800000; ++ *((int *)&__m256_result[3]) = 0x3f800000; ++ *((int *)&__m256_result[2]) = 0x3f800000; ++ *((int *)&__m256_result[1]) = 0x3f800000; ++ *((int *)&__m256_result[0]) = 0x3f800000; ++ __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7fc00000; ++ *((int *)&__m256_result[6]) = 0x7fc00000; ++ *((int *)&__m256_result[5]) = 0x7fc00000; ++ *((int *)&__m256_result[4]) = 0x7fc00000; ++ *((int *)&__m256_result[3]) = 0x7fc00000; ++ *((int *)&__m256_result[2]) = 0x7fc00000; ++ *((int *)&__m256_result[1]) = 0x7fc00000; ++ *((int *)&__m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x803f6004; ++ *((int *)&__m256_op0[4]) = 0x1f636003; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x803f6004; ++ *((int *)&__m256_op0[0]) = 0x1f636003; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x007f0107; ++ *((int *)&__m256_op1[4]) = 0x00c70106; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x007f0107; ++ *((int *)&__m256_op1[0]) = 0x00c70106; ++ *((int *)&__m256_result[7]) = 0x7fc00000; ++ *((int *)&__m256_result[6]) = 0x7fc00000; ++ *((int *)&__m256_result[5]) = 0xbeff7cfd; ++ *((int *)&__m256_result[4]) = 0x5e123f94; ++ *((int *)&__m256_result[3]) = 0x7fc00000; ++ *((int *)&__m256_result[2]) = 0x7fc00000; ++ *((int *)&__m256_result[1]) = 0xbeff7cfd; ++ *((int *)&__m256_result[0]) = 0x5e123f94; ++ __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000008; ++ *((int *)&__m256_op0[6]) = 0x60601934; ++ *((int *)&__m256_op0[5]) = 0x00000008; ++ *((int *)&__m256_op0[4]) = 0x00200028; ++ *((int *)&__m256_op0[3]) = 0x00000008; ++ *((int *)&__m256_op0[2]) = 0x60601934; ++ *((int *)&__m256_op0[1]) = 0x00000008; ++ *((int *)&__m256_op0[0]) = 0x00200028; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c +new file mode 100644 +index 000000000..5d5b4c43c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c +@@ -0,0 +1,152 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++/* { dg-timeout 500 } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000017f0000017d; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000017f0000017f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256d_op0[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256d_op0[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256d_op0[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c +new file mode 100644 +index 000000000..888e85b6e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c +@@ -0,0 +1,95 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++/* { dg-timeout 500 } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0xfffffff8; ++ *((int *)&__m256_op0[6]) = 0xffffff08; ++ *((int *)&__m256_op0[5]) = 0x00ff00f8; ++ *((int *)&__m256_op0[4]) = 0x00ffcff8; ++ *((int *)&__m256_op0[3]) = 0xfffffff8; ++ *((int *)&__m256_op0[2]) = 0xffffff08; ++ *((int *)&__m256_op0[1]) = 0x00ff00f8; ++ *((int *)&__m256_op0[0]) = 0x00ffcff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000008000000080; ++ __m256i_out = __lasx_xvfclass_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvfclass_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000020000000200; ++ __m256i_out = __lasx_xvfclass_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x000000ff; ++ *((int *)&__m256_op0[4]) = 0x000000ff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x000000ff; ++ *((int *)&__m256_op0[0]) = 0x000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000000100; ++ __m256i_out = __lasx_xvfclass_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffffffb; ++ *((int *)&__m256_op0[6]) = 0xfffffffb; ++ *((int *)&__m256_op0[5]) = 0xfffffffb; ++ *((int *)&__m256_op0[4]) = 0xfffffffb; ++ *((int *)&__m256_op0[3]) = 0xfffffffb; ++ *((int *)&__m256_op0[2]) = 0xfffffffb; ++ *((int *)&__m256_op0[1]) = 0xfffffffb; ++ *((int *)&__m256_op0[0]) = 0xfffffffb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvfclass_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c +new file mode 100644 +index 000000000..bba1a06f3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c +@@ -0,0 +1,86 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xc08f780000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256d_result[1]) = 0xc08f780000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvflogb_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c +new file mode 100644 +index 000000000..b641c733f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c +@@ -0,0 +1,115 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvflogb_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x10101010; ++ *((int *)&__m256_op0[6]) = 0x10101012; ++ *((int *)&__m256_op0[5]) = 0x10101010; ++ *((int *)&__m256_op0[4]) = 0x10101012; ++ *((int *)&__m256_op0[3]) = 0x10101010; ++ *((int *)&__m256_op0[2]) = 0x10101093; ++ *((int *)&__m256_op0[1]) = 0x11111111; ++ *((int *)&__m256_op0[0]) = 0x11111113; ++ *((int *)&__m256_result[7]) = 0xc2be0000; ++ *((int *)&__m256_result[6]) = 0xc2be0000; ++ *((int *)&__m256_result[5]) = 0xc2be0000; ++ *((int *)&__m256_result[4]) = 0xc2be0000; ++ *((int *)&__m256_result[3]) = 0xc2be0000; ++ *((int *)&__m256_result[2]) = 0xc2be0000; ++ *((int *)&__m256_result[1]) = 0xc2ba0000; ++ *((int *)&__m256_result[0]) = 0xc2ba0000; ++ __m256_out = __lasx_xvflogb_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xff800000; ++ *((int *)&__m256_result[6]) = 0xff800000; ++ *((int *)&__m256_result[5]) = 0xff800000; ++ *((int *)&__m256_result[4]) = 0xff800000; ++ *((int *)&__m256_result[3]) = 0xff800000; ++ *((int *)&__m256_result[2]) = 0xff800000; ++ *((int *)&__m256_result[1]) = 0xff800000; ++ *((int *)&__m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvflogb_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xff800000; ++ *((int *)&__m256_result[6]) = 0xff800000; ++ *((int *)&__m256_result[5]) = 0xff800000; ++ *((int *)&__m256_result[4]) = 0xff800000; ++ *((int *)&__m256_result[3]) = 0xff800000; ++ *((int *)&__m256_result[2]) = 0xff800000; ++ *((int *)&__m256_result[1]) = 0xff800000; ++ *((int *)&__m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvflogb_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000087; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000087; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xff800000; ++ *((int *)&__m256_result[6]) = 0xff800000; ++ *((int *)&__m256_result[5]) = 0xc30e0000; ++ *((int *)&__m256_result[4]) = 0xff800000; ++ *((int *)&__m256_result[3]) = 0xff800000; ++ *((int *)&__m256_result[2]) = 0xff800000; ++ *((int *)&__m256_result[1]) = 0xc30e0000; ++ *((int *)&__m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvflogb_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c +new file mode 100644 +index 000000000..c85c94bf6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c +@@ -0,0 +1,382 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xe37affb42fc05f69; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x65fb66c81da8e5ba; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256d_op2[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256d_op2[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256d_op2[0]) = 0xe3aebaf4df958004; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256d_result[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256d_result[0]) = 0xe3aebaf4df958004; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long *)&__m256d_op1[0]) = 0xff49fe4200000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x00020001ffb6ffe0; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0049004200000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xbf28b0686066be60; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0xc5c5c5c5c5c5c5c5; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x2); ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00007f7f00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000f1a40; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000aaaa0000aaaa; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0202810102020202; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0202810102020202; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x00007fff00000000; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256d_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x000000000000ffff; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256d_op1[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256d_op1[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256d_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000100010001; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffff000000; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[3]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256d_op2[2]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256d_op2[1]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256d_op2[0]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op2[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_op2[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_op2[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_op2[0]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffff5f5c; ++ __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000007380; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000f1c00; ++ *((unsigned long *)&__m256d_op2[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op2[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256d_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op2[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256d_result[3]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0x80000000ffff8c80; ++ *((unsigned long *)&__m256d_result[1]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0x80000000fff0e400; ++ __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x80000000000001dc; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x80000000000001dc; ++ __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0404000004040000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0404000004040000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256d_op1[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256d_op2[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256d_op2[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256d_op2[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256d_op2[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256d_result[2]) = 0x80003fc00000428a; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256d_result[0]) = 0x80003fc00000428a; ++ __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op2[2]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op2[0]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000100000001; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000100000001; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffb2f600006f48; ++ __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c +new file mode 100644 +index 000000000..bde41dd5c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c +@@ -0,0 +1,720 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xf328dfff; ++ *((int *)&__m256_op1[1]) = 0x6651bfff; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x0000ffff; ++ *((int *)&__m256_op2[6]) = 0x0000ff80; ++ *((int *)&__m256_op2[5]) = 0x00004686; ++ *((int *)&__m256_op2[4]) = 0x00007f79; ++ *((int *)&__m256_op2[3]) = 0x0000ffff; ++ *((int *)&__m256_op2[2]) = 0x0000ffff; ++ *((int *)&__m256_op2[1]) = 0x0000f328; ++ *((int *)&__m256_op2[0]) = 0x0000dfff; ++ *((int *)&__m256_result[7]) = 0x0000ffff; ++ *((int *)&__m256_result[6]) = 0x0000ff80; ++ *((int *)&__m256_result[5]) = 0x00004686; ++ *((int *)&__m256_result[4]) = 0x00007f79; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0x0000ffff; ++ *((int *)&__m256_result[1]) = 0x0000f328; ++ *((int *)&__m256_result[0]) = 0x0000dfff; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xfff10000; ++ *((int *)&__m256_op0[4]) = 0xfff10000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xfff10000; ++ *((int *)&__m256_op0[0]) = 0xfff10000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xfff10000; ++ *((int *)&__m256_result[4]) = 0xfff10000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xfff10000; ++ *((int *)&__m256_result[0]) = 0xfff10000; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x803f6004; ++ *((int *)&__m256_op2[4]) = 0x1f636003; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x803f6004; ++ *((int *)&__m256_op2[0]) = 0x1f636003; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x803f6004; ++ *((int *)&__m256_result[4]) = 0x1f636003; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x803f6004; ++ *((int *)&__m256_result[0]) = 0x1f636003; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffb3430a; ++ *((int *)&__m256_op0[4]) = 0x006ed8b8; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffb3430a; ++ *((int *)&__m256_op0[0]) = 0x006ed8b8; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x000001ff; ++ *((int *)&__m256_op1[4]) = 0x000003fe; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x000001ff; ++ *((int *)&__m256_op1[0]) = 0x000003fe; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x000000ff; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x000000ff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xfff3430a; ++ *((int *)&__m256_result[4]) = 0x000000ff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xfff3430a; ++ *((int *)&__m256_result[0]) = 0x000000ff; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffeb683; ++ *((int *)&__m256_op0[6]) = 0x9ffffd80; ++ *((int *)&__m256_op0[5]) = 0xfffe97c0; ++ *((int *)&__m256_op0[4]) = 0x20010001; ++ *((int *)&__m256_op0[3]) = 0xfffeb683; ++ *((int *)&__m256_op0[2]) = 0x9ffffd80; ++ *((int *)&__m256_op0[1]) = 0xfffe97c0; ++ *((int *)&__m256_op0[0]) = 0x20010001; ++ *((int *)&__m256_op1[7]) = 0x00009fff; ++ *((int *)&__m256_op1[6]) = 0x9ffffd80; ++ *((int *)&__m256_op1[5]) = 0x0000ffff; ++ *((int *)&__m256_op1[4]) = 0x20010001; ++ *((int *)&__m256_op1[3]) = 0x00009fff; ++ *((int *)&__m256_op1[2]) = 0x9ffffd80; ++ *((int *)&__m256_op1[1]) = 0x0000ffff; ++ *((int *)&__m256_op1[0]) = 0x20010001; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00002080; ++ *((int *)&__m256_op2[4]) = 0xdf5b41cf; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00002080; ++ *((int *)&__m256_op2[0]) = 0xdf5b41cf; ++ *((int *)&__m256_result[7]) = 0xfffeb683; ++ *((int *)&__m256_result[6]) = 0x007ffd80; ++ *((int *)&__m256_result[5]) = 0xfffe97c0; ++ *((int *)&__m256_result[4]) = 0xdf5b41cf; ++ *((int *)&__m256_result[3]) = 0xfffeb683; ++ *((int *)&__m256_result[2]) = 0x007ffd80; ++ *((int *)&__m256_result[1]) = 0xfffe97c0; ++ *((int *)&__m256_result[0]) = 0xdf5b41cf; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xfffeb664; ++ *((int *)&__m256_op1[6]) = 0x007ffd61; ++ *((int *)&__m256_op1[5]) = 0xfffe97a1; ++ *((int *)&__m256_op1[4]) = 0xdf5b41b0; ++ *((int *)&__m256_op1[3]) = 0xfffeb664; ++ *((int *)&__m256_op1[2]) = 0x007ffd61; ++ *((int *)&__m256_op1[1]) = 0xfffe97a1; ++ *((int *)&__m256_op1[0]) = 0xdf5b41b0; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x94d7fb52; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xfffeb664; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xfffe97a1; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xfffeb664; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xfffe97a1; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xb70036db; ++ *((int *)&__m256_op1[6]) = 0x12c4007e; ++ *((int *)&__m256_op1[5]) = 0xb7146213; ++ *((int *)&__m256_op1[4]) = 0xfc1e0049; ++ *((int *)&__m256_op1[3]) = 0x000000fe; ++ *((int *)&__m256_op1[2]) = 0xfe02fffe; ++ *((int *)&__m256_op1[1]) = 0xb71c413b; ++ *((int *)&__m256_op1[0]) = 0x199d04b5; ++ *((int *)&__m256_op2[7]) = 0xb70036db; ++ *((int *)&__m256_op2[6]) = 0x12c4007e; ++ *((int *)&__m256_op2[5]) = 0xb7146213; ++ *((int *)&__m256_op2[4]) = 0xfc1e0049; ++ *((int *)&__m256_op2[3]) = 0x000000fe; ++ *((int *)&__m256_op2[2]) = 0xfe02fffe; ++ *((int *)&__m256_op2[1]) = 0xb71c413b; ++ *((int *)&__m256_op2[0]) = 0x199d04b5; ++ *((int *)&__m256_result[7]) = 0x370036db; ++ *((int *)&__m256_result[6]) = 0x92c4007e; ++ *((int *)&__m256_result[5]) = 0x37146213; ++ *((int *)&__m256_result[4]) = 0x7c1e0049; ++ *((int *)&__m256_result[3]) = 0x800000fe; ++ *((int *)&__m256_result[2]) = 0x7e02fffe; ++ *((int *)&__m256_result[1]) = 0x371c413b; ++ *((int *)&__m256_result[0]) = 0x999d04b5; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x3f7f7f7e; ++ *((int *)&__m256_op1[4]) = 0xff800000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x3f7f7f7e; ++ *((int *)&__m256_op1[0]) = 0xff800000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x7fffffff; ++ *((int *)&__m256_op2[4]) = 0xff7fffff; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x7fffffff; ++ *((int *)&__m256_op2[0]) = 0xff7fffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x7fffffff; ++ *((int *)&__m256_result[4]) = 0x7fc00000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x7fffffff; ++ *((int *)&__m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffafaf; ++ *((int *)&__m256_op0[4]) = 0xb3b3dc9d; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffafaf; ++ *((int *)&__m256_op0[0]) = 0xb3b3dc9d; ++ *((int *)&__m256_op1[7]) = 0x00020000; ++ *((int *)&__m256_op1[6]) = 0x00020000; ++ *((int *)&__m256_op1[5]) = 0x00220021; ++ *((int *)&__m256_op1[4]) = 0x004a007e; ++ *((int *)&__m256_op1[3]) = 0x00020000; ++ *((int *)&__m256_op1[2]) = 0x00020000; ++ *((int *)&__m256_op1[1]) = 0x00220021; ++ *((int *)&__m256_op1[0]) = 0x004a007e; ++ *((int *)&__m256_op2[7]) = 0x00000001; ++ *((int *)&__m256_op2[6]) = 0x00007f7f; ++ *((int *)&__m256_op2[5]) = 0x00000001; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000001; ++ *((int *)&__m256_op2[2]) = 0x00007f7f; ++ *((int *)&__m256_op2[1]) = 0x00000001; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000001; ++ *((int *)&__m256_result[6]) = 0x80007f7f; ++ *((int *)&__m256_result[5]) = 0xffffafaf; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000001; ++ *((int *)&__m256_result[2]) = 0x80007f7f; ++ *((int *)&__m256_result[1]) = 0xffffafaf; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0xffffffe5; ++ *((int *)&__m256_op2[6]) = 0xffffffe5; ++ *((int *)&__m256_op2[5]) = 0xffffffe5; ++ *((int *)&__m256_op2[4]) = 0xffffffe5; ++ *((int *)&__m256_op2[3]) = 0xffffffe5; ++ *((int *)&__m256_op2[2]) = 0xffffffe5; ++ *((int *)&__m256_op2[1]) = 0xffffffe5; ++ *((int *)&__m256_op2[0]) = 0xffffffe5; ++ *((int *)&__m256_result[7]) = 0xffffffe5; ++ *((int *)&__m256_result[6]) = 0xffffffe5; ++ *((int *)&__m256_result[5]) = 0xffffffe5; ++ *((int *)&__m256_result[4]) = 0xffffffe5; ++ *((int *)&__m256_result[3]) = 0xffffffe5; ++ *((int *)&__m256_result[2]) = 0xffffffe5; ++ *((int *)&__m256_result[1]) = 0xffffffe5; ++ *((int *)&__m256_result[0]) = 0xffffffe5; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xbfffffff; ++ *((int *)&__m256_op0[6]) = 0xffff8000; ++ *((int *)&__m256_op0[5]) = 0xbfff8000; ++ *((int *)&__m256_op0[4]) = 0x80000000; ++ *((int *)&__m256_op0[3]) = 0xbfffffff; ++ *((int *)&__m256_op0[2]) = 0xffff8000; ++ *((int *)&__m256_op0[1]) = 0xbfff8000; ++ *((int *)&__m256_op0[0]) = 0x80000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0xffff8000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0xffff8000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x02020102; ++ *((int *)&__m256_op1[6]) = 0x02020102; ++ *((int *)&__m256_op1[5]) = 0x02020102; ++ *((int *)&__m256_op1[4]) = 0x02020102; ++ *((int *)&__m256_op1[3]) = 0x02020102; ++ *((int *)&__m256_op1[2]) = 0x02020102; ++ *((int *)&__m256_op1[1]) = 0x02020102; ++ *((int *)&__m256_op1[0]) = 0x02020102; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000008; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000008; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000008; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000008; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000008; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000008; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000008; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000008; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000001; ++ *((int *)&__m256_op2[4]) = 0x00000001; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000001; ++ *((int *)&__m256_op2[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x80000001; ++ *((int *)&__m256_result[4]) = 0x80000001; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x80000001; ++ *((int *)&__m256_result[0]) = 0x80000001; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000040; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000040; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x40404040; ++ *((int *)&__m256_op2[6]) = 0x40404040; ++ *((int *)&__m256_op2[5]) = 0x40404040; ++ *((int *)&__m256_op2[4]) = 0x40404040; ++ *((int *)&__m256_op2[3]) = 0x40404040; ++ *((int *)&__m256_op2[2]) = 0x40404040; ++ *((int *)&__m256_op2[1]) = 0x40404040; ++ *((int *)&__m256_op2[0]) = 0x40404040; ++ *((int *)&__m256_result[7]) = 0xc0404040; ++ *((int *)&__m256_result[6]) = 0xc0404040; ++ *((int *)&__m256_result[5]) = 0xc0404040; ++ *((int *)&__m256_result[4]) = 0xc0404040; ++ *((int *)&__m256_result[3]) = 0xc0404040; ++ *((int *)&__m256_result[2]) = 0xc0404040; ++ *((int *)&__m256_result[1]) = 0xc0404040; ++ *((int *)&__m256_result[0]) = 0xc0404040; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c +new file mode 100644 +index 000000000..207ba167f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c +@@ -0,0 +1,230 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m256d_op1[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256d_op1[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256d_op1[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256d_op1[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256d_result[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256d_result[0]) = 0x45c5c5c545c5c5c5; ++ __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000004290; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000002a96ba; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000004290; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000002a96ba; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000083f95466; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101010100005400; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000004290; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000083f95466; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000004290; ++ *((unsigned long *)&__m256d_result[0]) = 0x0101010100005400; ++ __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0101000101010001; ++ __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0200000202000002; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0200000202000002; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0101000101010001; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c +new file mode 100644 +index 000000000..9b7703231 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c +@@ -0,0 +1,560 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00010101; ++ *((int *)&__m256_op1[6]) = 0x01010101; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00010100; ++ *((int *)&__m256_op1[1]) = 0x00010000; ++ *((int *)&__m256_op1[0]) = 0x01000100; ++ *((int *)&__m256_result[7]) = 0x00010101; ++ *((int *)&__m256_result[6]) = 0x01010101; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00010100; ++ *((int *)&__m256_result[1]) = 0x00010000; ++ *((int *)&__m256_result[0]) = 0x01000100; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x59800000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x59800000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x59800000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x59800000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00010001; ++ *((int *)&__m256_op1[6]) = 0x00010001; ++ *((int *)&__m256_op1[5]) = 0x00010001; ++ *((int *)&__m256_op1[4]) = 0x00010001; ++ *((int *)&__m256_op1[3]) = 0x00010001; ++ *((int *)&__m256_op1[2]) = 0x00010001; ++ *((int *)&__m256_op1[1]) = 0x00010001; ++ *((int *)&__m256_op1[0]) = 0x00010001; ++ *((int *)&__m256_result[7]) = 0x00010001; ++ *((int *)&__m256_result[6]) = 0x00010001; ++ *((int *)&__m256_result[5]) = 0x00010001; ++ *((int *)&__m256_result[4]) = 0x00010001; ++ *((int *)&__m256_result[3]) = 0x00010001; ++ *((int *)&__m256_result[2]) = 0x00010001; ++ *((int *)&__m256_result[1]) = 0x00010001; ++ *((int *)&__m256_result[0]) = 0x00010001; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7fefffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x7fefffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x000000ff; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x000000ff; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00003fe0; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00003fe0; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00003fe0; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00003fe0; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x0000000e; ++ *((int *)&__m256_op1[6]) = 0x0000000e; ++ *((int *)&__m256_op1[5]) = 0x0000000e; ++ *((int *)&__m256_op1[4]) = 0x0000000e; ++ *((int *)&__m256_op1[3]) = 0x0000000e; ++ *((int *)&__m256_op1[2]) = 0x0000000e; ++ *((int *)&__m256_op1[1]) = 0x0000000e; ++ *((int *)&__m256_op1[0]) = 0x0000000e; ++ *((int *)&__m256_result[7]) = 0x0000000e; ++ *((int *)&__m256_result[6]) = 0x0000000e; ++ *((int *)&__m256_result[5]) = 0x0000000e; ++ *((int *)&__m256_result[4]) = 0x0000000e; ++ *((int *)&__m256_result[3]) = 0x0000000e; ++ *((int *)&__m256_result[2]) = 0x0000000e; ++ *((int *)&__m256_result[1]) = 0x0000000e; ++ *((int *)&__m256_result[0]) = 0x0000000e; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffdbbbcf; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffb8579f; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffdbbbcf; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffb8579f; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0xfff8579f; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0xfff8579f; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x40404040; ++ *((int *)&__m256_op1[6]) = 0x40404040; ++ *((int *)&__m256_op1[5]) = 0x40404040; ++ *((int *)&__m256_op1[4]) = 0x40404040; ++ *((int *)&__m256_op1[3]) = 0x40404040; ++ *((int *)&__m256_op1[2]) = 0x40404040; ++ *((int *)&__m256_op1[1]) = 0x40404040; ++ *((int *)&__m256_op1[0]) = 0x40404040; ++ *((int *)&__m256_result[7]) = 0x40404040; ++ *((int *)&__m256_result[6]) = 0x40404040; ++ *((int *)&__m256_result[5]) = 0x40404040; ++ *((int *)&__m256_result[4]) = 0x40404040; ++ *((int *)&__m256_result[3]) = 0x40404040; ++ *((int *)&__m256_result[2]) = 0x40404040; ++ *((int *)&__m256_result[1]) = 0x40404040; ++ *((int *)&__m256_result[0]) = 0x40404040; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x0000006d; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0010006d; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x0000006d; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0010006d; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00080040; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00080040; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00080040; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00080040; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00080040; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x0010006d; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00080040; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x0010006d; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x000002ff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x000002ff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x000002ff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x000002ff; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x7ff90000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x1ff60000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xfffffffe; ++ *((int *)&__m256_op1[4]) = 0x00000001; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xfffffffe; ++ *((int *)&__m256_op1[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000001; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000001; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x0218ff78; ++ *((int *)&__m256_op1[6]) = 0xfc38fc38; ++ *((int *)&__m256_op1[5]) = 0xfc000000; ++ *((int *)&__m256_op1[4]) = 0x00000048; ++ *((int *)&__m256_op1[3]) = 0x0218ff78; ++ *((int *)&__m256_op1[2]) = 0xfc38fc38; ++ *((int *)&__m256_op1[1]) = 0xfc000000; ++ *((int *)&__m256_op1[0]) = 0x00000048; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0xfc38fc38; ++ *((int *)&__m256_result[5]) = 0xfc000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0xfc38fc38; ++ *((int *)&__m256_result[1]) = 0xfc000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x000000f0; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x000000f0; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x000000f0; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x000000f0; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffe7; ++ *((int *)&__m256_op0[6]) = 0xffffffe7; ++ *((int *)&__m256_op0[5]) = 0xffffffe7; ++ *((int *)&__m256_op0[4]) = 0xffffffe7; ++ *((int *)&__m256_op0[3]) = 0xffffffe7; ++ *((int *)&__m256_op0[2]) = 0xffffffe7; ++ *((int *)&__m256_op0[1]) = 0xffffffe7; ++ *((int *)&__m256_op0[0]) = 0xffffffe7; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c +new file mode 100644 +index 000000000..96bbb942d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c +@@ -0,0 +1,230 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffff8001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000018; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000018; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000018; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000018; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0002000000010000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0002000000010000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000001; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000008000000080; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x00000000000000ff; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7efefefe80ffffff; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0087ff87f807ff87; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0087ff87f807ff87; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c +new file mode 100644 +index 000000000..c73a8a74a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c +@@ -0,0 +1,506 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00005555; ++ *((int *)&__m256_op1[6]) = 0x00005555; ++ *((int *)&__m256_op1[5]) = 0x000307ff; ++ *((int *)&__m256_op1[4]) = 0xfe72e815; ++ *((int *)&__m256_op1[3]) = 0x00005555; ++ *((int *)&__m256_op1[2]) = 0x00005555; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000015; ++ *((int *)&__m256_result[7]) = 0x00005555; ++ *((int *)&__m256_result[6]) = 0x00005555; ++ *((int *)&__m256_result[5]) = 0x000307ff; ++ *((int *)&__m256_result[4]) = 0xfe72e815; ++ *((int *)&__m256_result[3]) = 0x00005555; ++ *((int *)&__m256_result[2]) = 0x00005555; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000015; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00ff00ff; ++ *((int *)&__m256_op0[6]) = 0x00ff00ff; ++ *((int *)&__m256_op0[5]) = 0x00ff00ff; ++ *((int *)&__m256_op0[4]) = 0x000c0000; ++ *((int *)&__m256_op0[3]) = 0x00ff00ff; ++ *((int *)&__m256_op0[2]) = 0x00ff00ff; ++ *((int *)&__m256_op0[1]) = 0x00ff00ff; ++ *((int *)&__m256_op0[0]) = 0x00040000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00ff00ff; ++ *((int *)&__m256_result[6]) = 0x00ff00ff; ++ *((int *)&__m256_result[5]) = 0x00ff00ff; ++ *((int *)&__m256_result[4]) = 0x000c0000; ++ *((int *)&__m256_result[3]) = 0x00ff00ff; ++ *((int *)&__m256_result[2]) = 0x00ff00ff; ++ *((int *)&__m256_result[1]) = 0x00ff00ff; ++ *((int *)&__m256_result[0]) = 0x00040000; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x000007ff; ++ *((int *)&__m256_op0[6]) = 0x000007ff; ++ *((int *)&__m256_op0[5]) = 0x000007ff; ++ *((int *)&__m256_op0[4]) = 0xfffff800; ++ *((int *)&__m256_op0[3]) = 0x000007ff; ++ *((int *)&__m256_op0[2]) = 0x000007ff; ++ *((int *)&__m256_op0[1]) = 0x000007ff; ++ *((int *)&__m256_op0[0]) = 0xfffff800; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x000007ff; ++ *((int *)&__m256_result[6]) = 0x000007ff; ++ *((int *)&__m256_result[5]) = 0x000007ff; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x000007ff; ++ *((int *)&__m256_result[2]) = 0x000007ff; ++ *((int *)&__m256_result[1]) = 0x000007ff; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x001f00e0; ++ *((int *)&__m256_op0[4]) = 0x1f1f1fff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x001f00e0; ++ *((int *)&__m256_op0[0]) = 0x1f1f1fff; ++ *((int *)&__m256_op1[7]) = 0x80000000; ++ *((int *)&__m256_op1[6]) = 0x80000000; ++ *((int *)&__m256_op1[5]) = 0x80000000; ++ *((int *)&__m256_op1[4]) = 0xff800000; ++ *((int *)&__m256_op1[3]) = 0x80000000; ++ *((int *)&__m256_op1[2]) = 0x80000000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0xff800000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000001; ++ *((int *)&__m256_result[5]) = 0x001f00e0; ++ *((int *)&__m256_result[4]) = 0xff800000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000001; ++ *((int *)&__m256_result[1]) = 0x001f00e0; ++ *((int *)&__m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000001; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000001; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00009fff; ++ *((int *)&__m256_op0[6]) = 0x00002001; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x00009fff; ++ *((int *)&__m256_op0[2]) = 0x00002001; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0xfffeb683; ++ *((int *)&__m256_op1[6]) = 0x9ffffd80; ++ *((int *)&__m256_op1[5]) = 0xfffe97c0; ++ *((int *)&__m256_op1[4]) = 0x20010001; ++ *((int *)&__m256_op1[3]) = 0xfffeb683; ++ *((int *)&__m256_op1[2]) = 0x9ffffd80; ++ *((int *)&__m256_op1[1]) = 0xfffe97c0; ++ *((int *)&__m256_op1[0]) = 0x20010001; ++ *((int *)&__m256_result[7]) = 0x00009fff; ++ *((int *)&__m256_result[6]) = 0x9ffffd80; ++ *((int *)&__m256_result[5]) = 0x0000ffff; ++ *((int *)&__m256_result[4]) = 0x20010001; ++ *((int *)&__m256_result[3]) = 0x00009fff; ++ *((int *)&__m256_result[2]) = 0x9ffffd80; ++ *((int *)&__m256_result[1]) = 0x0000ffff; ++ *((int *)&__m256_result[0]) = 0x20010001; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000170; ++ *((int *)&__m256_op0[6]) = 0x00000080; ++ *((int *)&__m256_op0[5]) = 0xc0650055; ++ *((int *)&__m256_op0[4]) = 0x0055ffab; ++ *((int *)&__m256_op0[3]) = 0x00000170; ++ *((int *)&__m256_op0[2]) = 0x00000080; ++ *((int *)&__m256_op0[1]) = 0xc0650055; ++ *((int *)&__m256_op0[0]) = 0x0055ffab; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffff0000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffff0000; ++ *((int *)&__m256_op1[7]) = 0xfffefffe; ++ *((int *)&__m256_op1[6]) = 0xfffefffe; ++ *((int *)&__m256_op1[5]) = 0xfffefffe; ++ *((int *)&__m256_op1[4]) = 0xfffefffe; ++ *((int *)&__m256_op1[3]) = 0xfffefffe; ++ *((int *)&__m256_op1[2]) = 0xfffefffe; ++ *((int *)&__m256_op1[1]) = 0xfffefffe; ++ *((int *)&__m256_op1[0]) = 0xfffefffe; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0xffff0000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0xffff0000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00fe01f0; ++ *((int *)&__m256_op0[6]) = 0x00010000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00c40086; ++ *((int *)&__m256_op0[3]) = 0x00fe01f0; ++ *((int *)&__m256_op0[2]) = 0x00010000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00c40086; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x82a54290; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x028aa700; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x82a54290; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x02a54287; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00010000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00c40086; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00010000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00c40086; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x02a54290; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0154dc84; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x02a54290; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000089; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x02a54290; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x0154dc84; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x02a54290; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000089; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x02a54290; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x0154dc84; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x02a54290; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000089; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x04000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x04000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00100000; ++ *((int *)&__m256_op0[6]) = 0x00100000; ++ *((int *)&__m256_op0[5]) = 0x00100000; ++ *((int *)&__m256_op0[4]) = 0x00100000; ++ *((int *)&__m256_op0[3]) = 0x00100000; ++ *((int *)&__m256_op0[2]) = 0x00100000; ++ *((int *)&__m256_op0[1]) = 0x00100000; ++ *((int *)&__m256_op0[0]) = 0x00100000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000010; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000010; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000080; ++ *((int *)&__m256_op0[6]) = 0x00000080; ++ *((int *)&__m256_op0[5]) = 0x00000080; ++ *((int *)&__m256_op0[4]) = 0x00000080; ++ *((int *)&__m256_op0[3]) = 0x00000080; ++ *((int *)&__m256_op0[2]) = 0x00000080; ++ *((int *)&__m256_op0[1]) = 0x00000080; ++ *((int *)&__m256_op0[0]) = 0x00000080; ++ *((int *)&__m256_op1[7]) = 0x00000001; ++ *((int *)&__m256_op1[6]) = 0x00000001; ++ *((int *)&__m256_op1[5]) = 0x00000001; ++ *((int *)&__m256_op1[4]) = 0x00000001; ++ *((int *)&__m256_op1[3]) = 0x00000001; ++ *((int *)&__m256_op1[2]) = 0x00000001; ++ *((int *)&__m256_op1[1]) = 0x00000001; ++ *((int *)&__m256_op1[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x00000001; ++ *((int *)&__m256_result[6]) = 0x00000001; ++ *((int *)&__m256_result[5]) = 0x00000001; ++ *((int *)&__m256_result[4]) = 0x00000001; ++ *((int *)&__m256_result[3]) = 0x00000001; ++ *((int *)&__m256_result[2]) = 0x00000001; ++ *((int *)&__m256_result[1]) = 0x00000001; ++ *((int *)&__m256_result[0]) = 0x00000001; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c +new file mode 100644 +index 000000000..18d5c51de +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c +@@ -0,0 +1,482 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x2f03988e2052463e; ++ *((unsigned long *)&__m256d_result[2]) = 0x2f03988e1409212e; ++ *((unsigned long *)&__m256d_result[1]) = 0x2f03988e2052463e; ++ *((unsigned long *)&__m256d_result[0]) = 0x2f03988e1409212e; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff874dc687870000; ++ *((unsigned long *)&__m256d_result[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000100000018; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000100000018; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x1f60000000c00000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x1f60000000c00000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0003030300000300; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0003030300000300; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0003030300000100; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0003030300000100; ++ *((unsigned long *)&__m256d_result[3]) = 0x1febc46085090ea0; ++ *((unsigned long *)&__m256d_result[2]) = 0x1febc46085090ea0; ++ *((unsigned long *)&__m256d_result[1]) = 0x1febc46085090567; ++ *((unsigned long *)&__m256d_result[0]) = 0x1febc46085090567; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x1f9689fdb16cabbd; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x1f9689fdb16cabbd; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffff0000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000010000000100; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000010000000100; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x1fa0000000080000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffff8000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffff00000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0209fefb08140000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256d_result[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256d_result[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x5ff00007fff9fff3; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x555555553f800000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x353bb67af686ad9b; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x353bb67af686ad9b; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000001f0000ffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x60000007fffe0001; ++ *((unsigned long *)&__m256d_result[2]) = 0x60000007fffe0001; ++ *((unsigned long *)&__m256d_result[1]) = 0x6056fd4e7926d5c0; ++ *((unsigned long *)&__m256d_result[0]) = 0x6056fd4e1a4616c4; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00001bfa000000f9; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000f900004040; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00001bfa000000f9; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000f900004040; ++ *((unsigned long *)&__m256d_result[3]) = 0x60183329ceb52cf0; ++ *((unsigned long *)&__m256d_result[2]) = 0x6040392cdaf9b3ff; ++ *((unsigned long *)&__m256d_result[1]) = 0x60183329ceb52cf0; ++ *((unsigned long *)&__m256d_result[0]) = 0x6040392cdaf9b3ff; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x3de00103153ff5fb; ++ *((unsigned long *)&__m256d_op0[2]) = 0xbffffffe80000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x3de00103153ff5fb; ++ *((unsigned long *)&__m256d_op0[0]) = 0xbffffffe80000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256d_result[3]) = 0x606a20bd700e59a3; ++ *((unsigned long *)&__m256d_result[2]) = 0x6066a09e66c5f1bb; ++ *((unsigned long *)&__m256d_result[1]) = 0x606a20bd700e59a3; ++ *((unsigned long *)&__m256d_result[0]) = 0x6066a09e66c5f1bb; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x03fc03fc03f803f8; ++ *((unsigned long *)&__m256d_op0[2]) = 0x03fc03fc03f803f8; ++ *((unsigned long *)&__m256d_op0[1]) = 0x03fc03fc03f803f8; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256d_result[2]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256d_result[1]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xaf0489001bd4c0c3; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xaf0489001bd4c0c3; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000fffff614; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000fffff614; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000001e0000001e; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000001e0000001e; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000001e0000001e; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000001e0000001e; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff80000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8060000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8060000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c +new file mode 100644 +index 000000000..27df4a27d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c +@@ -0,0 +1,457 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000ff80; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x60b53246; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x60b5054d; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x0060005a; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x0060005a; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0x5f13ccf5; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0x5f13ccf5; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000002; ++ *((int *)&__m256_op0[4]) = 0x00000008; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000002; ++ *((int *)&__m256_op0[0]) = 0x00000008; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x64800000; ++ *((int *)&__m256_result[4]) = 0x64000000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x64800000; ++ *((int *)&__m256_result[0]) = 0x64000000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x000000bd; ++ *((int *)&__m256_op0[4]) = 0xfef907bc; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x000000bd; ++ *((int *)&__m256_op0[0]) = 0xfef907bc; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x62d2acee; ++ *((int *)&__m256_result[4]) = 0x7fc00000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x62d2acee; ++ *((int *)&__m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x04e8296f; ++ *((int *)&__m256_op0[6]) = 0x18181818; ++ *((int *)&__m256_op0[5]) = 0x132feea9; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x04e8296f; ++ *((int *)&__m256_op0[2]) = 0x18181818; ++ *((int *)&__m256_op0[1]) = 0x132feea9; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x5cbe15f2; ++ *((int *)&__m256_result[6]) = 0x53261036; ++ *((int *)&__m256_result[5]) = 0x559a674d; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x5cbe15f2; ++ *((int *)&__m256_result[2]) = 0x53261036; ++ *((int *)&__m256_result[1]) = 0x559a674d; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x000000ff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ff00; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfc003802; ++ *((int *)&__m256_op0[6]) = 0xfc000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xfc00fc00; ++ *((int *)&__m256_op0[3]) = 0xfc003802; ++ *((int *)&__m256_op0[2]) = 0xfc000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xfc00fc00; ++ *((int *)&__m256_result[7]) = 0x82ff902d; ++ *((int *)&__m256_result[6]) = 0x83000000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x82fe0bd9; ++ *((int *)&__m256_result[3]) = 0x82ff902d; ++ *((int *)&__m256_result[2]) = 0x83000000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x82fe0bd9; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfd02fd02; ++ *((int *)&__m256_op0[6]) = 0xfd02fd02; ++ *((int *)&__m256_op0[5]) = 0xfd02fd02; ++ *((int *)&__m256_op0[4]) = 0xfd02fd02; ++ *((int *)&__m256_op0[3]) = 0xfd02fd02; ++ *((int *)&__m256_op0[2]) = 0xfd02fd02; ++ *((int *)&__m256_op0[1]) = 0xfd02fd02; ++ *((int *)&__m256_op0[0]) = 0xfd02fd02; ++ *((int *)&__m256_result[7]) = 0x81fa28e4; ++ *((int *)&__m256_result[6]) = 0x81fa28e4; ++ *((int *)&__m256_result[5]) = 0x81fa28e4; ++ *((int *)&__m256_result[4]) = 0x81fa28e4; ++ *((int *)&__m256_result[3]) = 0x81fa28e4; ++ *((int *)&__m256_result[2]) = 0x81fa28e4; ++ *((int *)&__m256_result[1]) = 0x81fa28e4; ++ *((int *)&__m256_result[0]) = 0x81fa28e4; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-subtraction-instr.patch b/LoongArch-Add-tests-for-ASX-vector-subtraction-instr.patch new file mode 100644 index 0000000000000000000000000000000000000000..eef0890dbc8a9e0b1819badcf093372a2b94df1d --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-subtraction-instr.patch @@ -0,0 +1,4566 @@ +From dcd9959504b5e8a0d9346d9ffb45542c1250c538 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 15:21:25 +0800 +Subject: [PATCH 101/124] LoongArch: Add tests for ASX vector subtraction + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsub.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsubi.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvssub-1.c | 425 +++++++++++ + .../loongarch/vector/lasx/lasx-xvssub-2.c | 695 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvsub.c | 590 +++++++++++++++ + .../loongarch/vector/lasx/lasx-xvsubi.c | 482 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvsubwev-1.c | 530 +++++++++++++ + .../loongarch/vector/lasx/lasx-xvsubwev-2.c | 440 +++++++++++ + .../loongarch/vector/lasx/lasx-xvsubwod-1.c | 695 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvsubwod-2.c | 620 ++++++++++++++++ + 8 files changed, 4477 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c +new file mode 100644 +index 000000000..ada72a16a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c +@@ -0,0 +1,425 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff24; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff24; ++ __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000bdfef907bc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000bdfef907bc; ++ __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffc0; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2b2b2b2b1bd68080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2a2ad4d4f2d8807e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2b2b2b2b1bd68080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2a2ad4d4f2d8807e; ++ *((unsigned long *)&__m256i_result[3]) = 0xd4d5d4d5e42a7f80; ++ *((unsigned long *)&__m256i_result[2]) = 0xd5d62b2c0d287f82; ++ *((unsigned long *)&__m256i_result[1]) = 0xd4d5d4d5e42a7f80; ++ *((unsigned long *)&__m256i_result[0]) = 0xd5d62b2c0d287f82; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff07b4ffff0707; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000b8070000a787; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff07b4ffff0707; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000b8070000a787; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffb7650000d496; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001800000018000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffb7650000d496; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001800000018000; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff000003c0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff000003c0; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_result[2]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_result[1]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_result[0]) = 0xff81001dff9d003b; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00fd0101; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00fd0101; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00fd0101; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00fd0101; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x40f69fe63c26f4f5; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ff7ffff00000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x40f69fe63c26f4f5; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff7ffff00000007; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff00007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff00007fff; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffefef800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffefef800; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f0000007f0060; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4393a0a5bc606060; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43b32feea9000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4393a0a5bc606060; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43b32feea9000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256i_op1[2]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256i_op1[0]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x04e8296f3c611818; ++ *((unsigned long *)&__m256i_result[2]) = 0x032eafee29010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x04e8296f3c611818; ++ *((unsigned long *)&__m256i_result[0]) = 0x032eafee29010000; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001ff91ff100000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001ff91ff100000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffffff7fff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001ff91ff0ffdfe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffff7fff80; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001ff91ff0ffdfe; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c +new file mode 100644 +index 000000000..f42523850 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c +@@ -0,0 +1,695 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7fdd5ffebe1c9e3; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7fdd5ffebe1c9e3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000002467db99; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003e143852; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000002467db99; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000003e143852; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffdb982466; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7fdd5ffadcd9191; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffdb982466; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7fdd5ffadcd9191; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fef0000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fef0000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000f880f87e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000f880f87e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010511c54440438; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010511c54440438; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000030b8; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100002000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000808000008080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000808000008081; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffec; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffec; ++ *((unsigned long *)&__m256i_result[3]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000403f3fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_result[3]) = 0x38f7414938f7882f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x38f7414938f78830; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0f0f0ef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf0f0f0f0f0f0f0ef; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0f0f0ef; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf0f0f0f0f0f0f0ef; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000180007f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffafaf80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000180007f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffafaf80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000070f07170; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000070f0f0ef; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000070f07170; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000070f0f0ef; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000032; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000003c000000032; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000004e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffffffff; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0feff00000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0feff00000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x419cd5b11c3c5654; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1010100fefefeff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0f8f0e8df676f778; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffdfffffffdfffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffdfffffffdfffff; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffe8ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffe8ffffffe8; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffe8ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffe8ffffffe8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c +new file mode 100644 +index 000000000..c1de1e8d3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c +@@ -0,0 +1,590 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010100000000; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffbe20fc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000001cc7ee87; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000010bb83239; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000c409ed87; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0100020001bf1efd; ++ *((unsigned long *)&__m256i_result[2]) = 0x010002001ec8ec88; ++ *((unsigned long *)&__m256i_result[1]) = 0x010002010db9303a; ++ *((unsigned long *)&__m256i_result[0]) = 0x01000200c60aeb88; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010200000000; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_result[2]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_result[0]) = 0x7e00000000000000; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000004000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000004000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff003f003f00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff0101fd00010100; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff003f003f00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff0101fd00010100; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff010000fff9; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff19; ++ *((unsigned long *)&__m256i_result[1]) = 0xff02ff020001fffa; ++ *((unsigned long *)&__m256i_result[0]) = 0x000100010001fffa; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x43d03bfff827ea21; ++ *((unsigned long *)&__m256i_op1[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43e019c657c7d050; ++ *((unsigned long *)&__m256i_result[3]) = 0xbc30c40107d915df; ++ *((unsigned long *)&__m256i_result[2]) = 0xbc263e0e5c80b010; ++ *((unsigned long *)&__m256i_result[1]) = 0xbc30c40107d91607; ++ *((unsigned long *)&__m256i_result[0]) = 0xbc20e63aa8392fb0; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_result[2]) = 0xff21c241ff21c238; ++ *((unsigned long *)&__m256i_result[1]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_result[0]) = 0xff21c241ff21c238; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f8f7f8f7f8f7f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f8f7f8f7f8f7f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000e000e000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000e000e000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000e000e000e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000e000e000e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x207f207f207f2000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000207f2000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[3]) = 0xdf80df80df80dfff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffdf80dfff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xc5c5c5c5c5c5c5c5; ++ *((unsigned long *)&__m256i_result[2]) = 0x45c5c5c645c5c5c6; ++ *((unsigned long *)&__m256i_result[1]) = 0xc5c5c5c5c5c5c5c5; ++ *((unsigned long *)&__m256i_result[0]) = 0x45c5c5c645c5c5c6; ++ __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe0f02081c1c4ce2c; ++ *((unsigned long *)&__m256i_result[2]) = 0x8008000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xe0f02081c1c4ce2c; ++ *((unsigned long *)&__m256i_result[0]) = 0x8008000000000000; ++ __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000001c9880; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000001c9880; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000100000001; ++ __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00b213181dff0607; ++ *((unsigned long *)&__m256i_result[2]) = 0x00e9a80114ff0001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00b213181dff0607; ++ *((unsigned long *)&__m256i_result[0]) = 0x00e9a80114ff0001; ++ __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fdfdfe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ffe0001fffe0001; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ffe0001fffeffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000fdfdfe; ++ __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000006f0000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000006f0000007f; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808080808081; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808081; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op1[1]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_result[3]) = 0x1f9d9f9d1f9db29f; ++ *((unsigned long *)&__m256i_result[2]) = 0x1f9d9f9d201cb39e; ++ *((unsigned long *)&__m256i_result[1]) = 0x201c9f9d201cb29f; ++ *((unsigned long *)&__m256i_result[0]) = 0x1f9d9f9d201cb39e; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc192181230000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc192181230000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4010000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long *)&__m256i_result[1]) = 0x4010000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3e6ce7d9cb7afb62; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffed; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x90007fff90008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ffffffe90008000; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff80000000; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfa15fa15fa15fa14; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0505070804040404; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0504070804040404; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0505070804040404; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0504070804040404; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ff000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ff000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0504080804030405; ++ *((unsigned long *)&__m256i_result[2]) = 0x0504060904040305; ++ *((unsigned long *)&__m256i_result[1]) = 0x0504080804030405; ++ *((unsigned long *)&__m256i_result[0]) = 0x0504060904040305; ++ __m256i_out = __lasx_xvsub_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000fff8ff40; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ff0100090040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000fff8ff40; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ff0100090040; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffefff80; ++ __m256i_out = __lasx_xvsub_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c +new file mode 100644 +index 000000000..a3c0de6d3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c +@@ -0,0 +1,482 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long *)&__m256i_result[2]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long *)&__m256i_result[1]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long *)&__m256i_result[0]) = 0xe9e9e9e9e9e9e9e9; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0xf9f8f9f8f9f9f900; ++ *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f9f9f9f9e0; ++ *((unsigned long *)&__m256i_result[1]) = 0xf9f8f9f8f9f9f900; ++ *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f9f9f9f900; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xefefefefefefefef; ++ *((unsigned long *)&__m256i_result[2]) = 0xefefefefefefefef; ++ *((unsigned long *)&__m256i_result[1]) = 0xefefefefefefef6e; ++ *((unsigned long *)&__m256i_result[0]) = 0xeeeeeeeeeeeeeeee; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[2]) = 0x6aeaeaeaeaeaeaea; ++ *((unsigned long *)&__m256i_result[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[0]) = 0x6aeaeaeaeaeaeaea; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf6f6f6f6f6f6f6f6; ++ *((unsigned long *)&__m256i_result[2]) = 0xf6f6f6f6f6f6f6f6; ++ *((unsigned long *)&__m256i_result[1]) = 0xf6f6f6f6f6f6f6f6; ++ *((unsigned long *)&__m256i_result[0]) = 0xf6f6f6f6f6f6f6f6; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002a54290; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[2]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[1]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[0]) = 0xe7e7e7e7e7e7e7e7; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_result[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_result[2]) = 0xdbcbdbcbdbcbdbcb; ++ *((unsigned long *)&__m256i_result[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_result[0]) = 0xdbcbdbcbdbcbdbcb; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0e0d0c0b0e0d0c0b; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0e0d0c0b0e0d0c0b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0e0d0c0b0e0d0c0b; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0e0d0c0b0e0d0c0b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0a0908070a090807; ++ *((unsigned long *)&__m256i_result[2]) = 0x0a0908070a090807; ++ *((unsigned long *)&__m256i_result[1]) = 0x0a0908070a090807; ++ *((unsigned long *)&__m256i_result[0]) = 0x0a0908070a090807; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_result[2]) = 0xf2f2f2f2f2f2f2f2; ++ *((unsigned long *)&__m256i_result[1]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_result[0]) = 0xf2f2f2f2f2f2f2f2; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[2]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[0]) = 0xebebebebebebebeb; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefefdfdfdfd; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefefdfdfdfd; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe4e4e4e4e4e4e4e4; ++ *((unsigned long *)&__m256i_result[2]) = 0xe4e4e4e4e4e4e4e4; ++ *((unsigned long *)&__m256i_result[1]) = 0xe4e4e4e4e4e4e4e4; ++ *((unsigned long *)&__m256i_result[0]) = 0xe4e4e4e4e4e4e4e4; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff7fff7fff7fff7; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff7fff7fff7fff7; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff7fff7fff7fff7; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff7fff7fff7fff7; ++ __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000022be22be; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fffa2bea2be; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000022be22be; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fffa2bea2be; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe1ffe1229f229f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fe07fe0a29fa29f; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe1ffe1229f229f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fe07fe0a29fa29f; ++ __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe5ffe5ffe5ffe5; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe5ffe5ffe5ffe5; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe5ffe5ffe5ffe5; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe5ffe5ffe5ffe5; ++ __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1fff1fff1fff1; ++ __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffcfffcfffcfffc; ++ __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffef000004ea; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffefffffffef; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffecffffffec; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000018; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000018; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff30000000b; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff30000000b; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff3fffffff3; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff5fffffff5; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff5fffffff5; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff5fffffff5; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff5fffffff5; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffe5ffffffe5; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffeaffffffea; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffeaffffffea; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffeaffffffea; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffeaffffffea; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x5d20a0895d20a089; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffe8ffffffe8; ++ *((unsigned long *)&__m256i_result[1]) = 0x5d20a0895d20a089; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffe8ffffffe8; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffe8ffffffe8; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffe8ffffffe8; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffe8ffffffe8; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffe8ffffffe8; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffcfffffffc; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffcfffffffc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffcfffffffc; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffcfffffffc; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb683007ffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c0df5b41cf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb683007ffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c0df5b41cf; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe97a1df5b41b0; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffe7ffffffe7; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000400100004001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000400100004001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003ff000003ff0; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffe4ffffffe4; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffefb; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffefb; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffc0008001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffffc0008001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffc0008001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffffc0008001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffffc0007fe9; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffffc0007fe9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffc0007fe9; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffffc0007fe9; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff6; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffee; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe6; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffe6; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffe6; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffe6; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffe1; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000006d; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000006d; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000010006d; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffee; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff4; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffed; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c +new file mode 100644 +index 000000000..caa72ca61 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c +@@ -0,0 +1,530 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffb10001ff8f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001004c0001ff87; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffb10001ff8f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001004c0001ff87; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff7; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ff02ff80fede; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ff02ff80fede; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fff0ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff0ffc0; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0000; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001d0000001c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001d0000001c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001d0000001c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001d0000001c; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffeff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffeff00000000; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010203; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffefefffffefe; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000fffffffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000008080809; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000008080809; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000008080809; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000008080809; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffd; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffd; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffd; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffd; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff1cff18; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff1cff18; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000001400; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffec00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffc3fe007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffc3fe007; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00010000; ++ __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010100000102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010100000102; ++ __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007fffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007fffff007fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007fffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007fffff007fffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffbdff3cffbdff44; ++ __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c +new file mode 100644 +index 000000000..57d883c04 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c +@@ -0,0 +1,440 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000007f0000007f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000007f0000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff80ff01ff80; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff800000007e; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0043030300400300; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0043030300400300; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0043030300400100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0043030300400100; ++ *((unsigned long *)&__m256i_result[3]) = 0xffdd001dffe00020; ++ *((unsigned long *)&__m256i_result[2]) = 0xffdd001dffe00031; ++ *((unsigned long *)&__m256i_result[1]) = 0xffdd001dffe00020; ++ *((unsigned long *)&__m256i_result[0]) = 0xffdd001dffe00031; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe1ffe0ffe1ffe0; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffee00ba; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffee00ba; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00fffff500ba; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00fffff500ba; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000004efffe00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000047000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000004efffe00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000047000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff01; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff01ff01ff01; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fffc0000fffc; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffc0000fffc; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001504f4c4b2361; ++ *((unsigned long *)&__m256i_op0[2]) = 0x303338a48f374969; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001504f4c4b2361; ++ *((unsigned long *)&__m256i_op0[0]) = 0x303338a48f374969; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff47b4ffff5879; ++ __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffbf4; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000308; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010100000102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010100000102; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffefd; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80000000ffff8c80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80000000fff0e400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000f1a40; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000003effe1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000003effe1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000003effe1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000003effe1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffff7; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffff7; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0002; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0002; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0002; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0002; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0000; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000000001; ++ __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffeffffff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffeffffff00; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000100; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000100; ++ __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c +new file mode 100644 +index 000000000..1687729d3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c +@@ -0,0 +1,695 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe4ffe6ffe5ffe6; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe4ffe6ffe5ffe6; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe4ffe6ffe5ffe6; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe4ffe6ffe5ffe6; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x017e01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0586060601fe0202; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e01fe01fe0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0586060601fe0004; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffbfffafffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffbfffaffff0000; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_result[1]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffefffefffefffef; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100007fff; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000043efffff8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000043efffff8000; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000003f00001f63; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000003f00001f63; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff80ff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff80ff; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc3030000ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc3030000ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003cfc0000006f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003cfc0000006f; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffb2f600006f48; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000001fffe; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000060000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000060000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000020202020; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8ff40; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ff0100090040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8ff40; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ff0100090040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff02; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff02; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffe00; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1e17ffffd0fc6772; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1e17ffffebf6ded2; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1e17ffffd0fc6772; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1e17ffffebf6ded2; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xe1e800002f03988d; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xe1e800002f03988d; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9cffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9cffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x6300000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x6300000000000001; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000808; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000e000e000e000e; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0a0a000000000a0a; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c +new file mode 100644 +index 000000000..8d6ed92a1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c +@@ -0,0 +1,620 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff01ff01ff01; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000020001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffcc8000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007dfdff4b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff3400000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff83ff01; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ff010000ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff010000ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff010000ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff010000ff01; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff0fff0fff0fff0; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_result[3]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256i_result[2]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256i_result[1]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256i_result[0]) = 0xff21ff21ff21ff21; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4079808280057efe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007ffcfcfd020202; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x004000800080007e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000fc00fd0002; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff0100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff0100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff0100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff0100000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe00007f000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff7fff00007f00; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000100007fff; ++ __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000b8f81b8c840e4; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000b8f81b8c840e4; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffb3b4; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff5ffff4738; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffb3b4; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff5ffff4738; ++ __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00009fff9ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff20010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00009fff9ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff20010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00002080df5b41cf; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00002080df5b41cf; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000009fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff40a6; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000009fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff40a6; ++ __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00007fffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00007fffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff8001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff8001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x020afefb08140000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff00ffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff0001ff02; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff020afefc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000003fefd; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1514151415141514; ++ *((unsigned long *)&__m256i_op1[2]) = 0x151415141514e335; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1514151415141514; ++ *((unsigned long *)&__m256i_op1[0]) = 0x151415141514e335; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000e9ece9ec; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000c005e000c0029; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0004005600040020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000300000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000300000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000060008; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000c005b; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffe0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000040053; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f784000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f8f7f84000fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f784000ff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f7f8f7f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000003f78; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f7f8f7f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000003f78; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000070007000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff8fff9000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff8fff9000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff8fff9000; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff37b737b8; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff77b737b8; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff37b737b8; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff77b737b8; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff457db03f; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffafafb3b3dc9d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffafafb3b3dc9d; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000008050501; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000029170; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000029170; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001fff000; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000090b0906; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvabsd-xvavg-xvav.patch b/LoongArch-Add-tests-for-ASX-vector-xvabsd-xvavg-xvav.patch new file mode 100644 index 0000000000000000000000000000000000000000..bef950c0efa2d615f5cb79658d380b80e5daa892 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvabsd-xvavg-xvav.patch @@ -0,0 +1,5595 @@ +From 02a3c7b1dc6b66bad2d7eca396176cb9fd731a79 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 16:42:49 +0800 +Subject: [PATCH 115/124] LoongArch: Add tests for ASX vector + xvabsd/xvavg/xvavgr/xvbsll/xvbsrl/xvneg/ xvsat instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvneg.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvabsd-1.c | 485 +++++++++++ + .../loongarch/vector/lasx/lasx-xvabsd-2.c | 650 +++++++++++++++ + .../loongarch/vector/lasx/lasx-xvavg-1.c | 680 ++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvavg-2.c | 560 +++++++++++++ + .../loongarch/vector/lasx/lasx-xvavgr-1.c | 770 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvavgr-2.c | 650 +++++++++++++++ + .../loongarch/vector/lasx/lasx-xvbsll_v.c | 130 +++ + .../loongarch/vector/lasx/lasx-xvbsrl_v.c | 64 ++ + .../loongarch/vector/lasx/lasx-xvneg.c | 526 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvsat-1.c | 537 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvsat-2.c | 427 ++++++++++ + 11 files changed, 5479 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c +new file mode 100644 +index 000000000..41fae32df +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c +@@ -0,0 +1,485 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x34598d0fd19314cb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1820939b2280fa86; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4a1c269b8e892a3a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x063f2bb758abc664; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffc0fcffffcf83; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000288a00003c1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x3459730f2f6d1435; ++ *((unsigned long *)&__m256i_result[2]) = 0x19212d61237f2b03; ++ *((unsigned long *)&__m256i_result[1]) = 0x4a1c266572772a3a; ++ *((unsigned long *)&__m256i_result[0]) = 0x063f032d58557648; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007f017f01; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007f017f01; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001504f4c4b2361; ++ *((unsigned long *)&__m256i_result[2]) = 0x303338a48f374969; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001504f4c4b2361; ++ *((unsigned long *)&__m256i_result[0]) = 0x303338a48f374969; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x807c7fffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80817fff00810000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x807c7fffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80817fff00810000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x80767f0101050101; ++ *((unsigned long *)&__m256i_result[2]) = 0x80817f01007f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x80767f0101050101; ++ *((unsigned long *)&__m256i_result[0]) = 0x80817f01007f0000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x437fe01fe01fe020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x437fe01fe01fe020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x037fe01f001fe020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x037fe01f001fe020; ++ *((unsigned long *)&__m256i_result[3]) = 0x437f201f201f2020; ++ *((unsigned long *)&__m256i_result[2]) = 0x037f201f001f2020; ++ *((unsigned long *)&__m256i_result[1]) = 0x437f201f201f2020; ++ *((unsigned long *)&__m256i_result[0]) = 0x037f201f001f2020; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1f60010000080100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1f60010000080100; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000010100020103; ++ *((unsigned long *)&__m256i_result[2]) = 0x040f040f040b236d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000010100020103; ++ *((unsigned long *)&__m256i_result[0]) = 0x040f040f040b236d; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100010080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100010080; ++ __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000073333333; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000073333333; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000073333333; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000073333333; ++ __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000050fd00000101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000040c100000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000050fd00000101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000040c100000101; ++ *((unsigned long *)&__m256i_result[3]) = 0x000050fd00000101; ++ *((unsigned long *)&__m256i_result[2]) = 0x000040c100000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x000050fd00000101; ++ *((unsigned long *)&__m256i_result[0]) = 0x000040c100000101; ++ __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01ffff4300fffeff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfe0000bcff000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01ffff4300fffeff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfe0000bcff000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x81ff00bd80ff0101; ++ *((unsigned long *)&__m256i_result[2]) = 0x01ff00bd00ff0101; ++ *((unsigned long *)&__m256i_result[1]) = 0x81ff00bd80ff0101; ++ *((unsigned long *)&__m256i_result[0]) = 0x01ff00bd00ff0101; ++ __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003fea00013feb; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fe900014022; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003fea00013feb; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fe900014022; ++ __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0cc08723ff900001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xcc9b89f2f6cef440; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0cc08723006fffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x3364760e09310bc0; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000017f0000017d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000017f0000017f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000017f0000017d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000017f0000017f; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00fe01f000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00fe01f000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbf800000bf800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd662fa0000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbf800000bf800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd6ef750000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x417e01f040800000; ++ *((unsigned long *)&__m256i_result[2]) = 0x299d060000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x417e01f040800000; ++ *((unsigned long *)&__m256i_result[0]) = 0x29108b0000000000; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001700170017; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffb79fb74; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffb79fb74; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x000100010485048a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0005ff870005ff86; ++ *((unsigned long *)&__m256i_result[1]) = 0x000100010485048a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0005ff870005ff86; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000400100004001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000400100004001; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvabsd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvabsd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c +new file mode 100644 +index 000000000..bd7a9069d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000001010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000001010000; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000100010485048a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0005ff870005ff86; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000100010485048a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0005ff870005ff86; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffa0065fffa0066; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffa0065fffa0066; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x400040003abf4000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x400040003abf4000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000003fff3fff; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0408040800008003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0408040800008003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff80800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0408040800008003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x04080408fff87803; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0001; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff003f003f00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff0101fd00010100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff003f003f00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff0101fd00010100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00ff003f003f00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff0101fd00010100; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00ff003f003f00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff0101fd00010100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffff5fff7; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffff5fff7; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001010000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001400000014; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfbba01c0003f7e3f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfbd884e7003f7e3f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff874dc687870000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfbba01c0003f7e3f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_result[1]) = 0xfbd884e7003f7e3f; ++ *((unsigned long *)&__m256i_result[0]) = 0xff874dc687870000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x800000007fff0001; ++ *((unsigned long *)&__m256i_result[2]) = 0x80000000ff7f0001; ++ *((unsigned long *)&__m256i_result[1]) = 0x800000007fff0001; ++ *((unsigned long *)&__m256i_result[0]) = 0x80000000ff7f0001; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe01fe01fe01fe; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010000080040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000080040; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffe; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00040000; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003f3f0000400d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f3f0000400d; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffff88; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe98; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f80ffffff808000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f80ffffff808000; ++ *((unsigned long *)&__m256i_result[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f0000007f7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f0000007f7fff; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c +new file mode 100644 +index 000000000..5ce31ebbd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c +@@ -0,0 +1,680 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000001f; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000100da000100fd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001ffe20001fefd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001009a000100fd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001ff640001fefd; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000edff00fffd; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fff10000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000cdff00fffd; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff320000ffff; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffe00f7ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffff629d7; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffe00f7ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffff629d7; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xd010101010101010; ++ *((unsigned long *)&__m256i_result[2]) = 0xd010101010103218; ++ *((unsigned long *)&__m256i_result[1]) = 0xd010101010101010; ++ *((unsigned long *)&__m256i_result[0]) = 0xd010101010103218; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400100013; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000400100014; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000400100013; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000400000004; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00010002; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008000800080008; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010101010101013; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010101010101013; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000006170; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000006170; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000030b8; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202010202020102; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000405; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000405; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000202; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000500040005; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256i_op1[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe3aebaf4df958004; ++ *((unsigned long *)&__m256i_result[3]) = 0xc58a0a0a07070706; ++ *((unsigned long *)&__m256i_result[2]) = 0x006b60e4180b0023; ++ *((unsigned long *)&__m256i_result[1]) = 0x1b39153f334b966a; ++ *((unsigned long *)&__m256i_result[0]) = 0xf1d75d79efcac002; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff00007fff; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffff00000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffff00000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x003fffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x003fffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff7fffffff; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0080808080808080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0080808080808080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080808100808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0080808000808080; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0888888888888888; ++ *((unsigned long *)&__m256i_result[2]) = 0x0888888888888888; ++ *((unsigned long *)&__m256i_result[1]) = 0x0888888888888888; ++ *((unsigned long *)&__m256i_result[0]) = 0x0888888888888888; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4010000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4010000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2008000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1f3673ece5bd7db1; ++ *((unsigned long *)&__m256i_result[1]) = 0x2008000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1f3673ece5bd7db1; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000400000003fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000400000003fff; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000020202000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000020202000; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffe00fe00; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000001fe01dde; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffe00fe00; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000001fe01dde; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000080040; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c +new file mode 100644 +index 000000000..d04e42753 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c +@@ -0,0 +1,560 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x61d849f0c0794ced; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe75278c187b20039; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf90c0c0c00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0ca40c0c0c0c0cc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0c0c0c0c0cb60cc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfbe0b80c960c96d0; ++ *((unsigned long *)&__m256i_result[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_result[2]) = 0x146014141414146e; ++ *((unsigned long *)&__m256i_result[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_result[0]) = 0xf19998668e5f4b84; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff00fff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f007f78; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffff0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fefffeff02ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00030006fa05f20e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00030081bd80f90e; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00010003fc827a86; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f7f7f7f0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f017fc0ddbf7d86; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000101010001; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f00000000; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000007f00000022; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000003f00000011; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000003f00000000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fff3fff3fff3fff; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffbfffafffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffbfffaffff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe01fc01fe01fc; ++ *((unsigned long *)&__m256i_op1[2]) = 0x012c002c001c0006; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe01fc01fe0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x012c002c001c000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long *)&__m256i_result[2]) = 0x80938013800d8002; ++ *((unsigned long *)&__m256i_result[1]) = 0x807e80fd80fe0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x80938013800d0005; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000800080008000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fff7fffffc08008; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fff7fffffc08008; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x800000007fff0001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80000000ff7f0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x800000007fff0001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80000000ff7f0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x800000007fff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x80000000ff7f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x800000007fff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x80000000ff7f0000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff0000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000004; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_result[2]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_result[0]) = 0x7e00000000000000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007fff00400011; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000008001ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007fff00400011; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a09080706050403; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a09080706050403; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0504840303028201; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0504840303028201; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000005000000020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000005000000020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002800000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002800000010; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40efffe000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40efffe000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x111ebb784f9c4100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1c386546809f3b50; ++ *((unsigned long *)&__m256i_op1[1]) = 0x111ebb784f9bf1ac; ++ *((unsigned long *)&__m256i_op1[0]) = 0x21f6050d955d3f68; ++ *((unsigned long *)&__m256i_result[3]) = 0x088f5dbc27ce2080; ++ *((unsigned long *)&__m256i_result[2]) = 0x161c32a2c04f9da7; ++ *((unsigned long *)&__m256i_result[1]) = 0x088f5dbc27cdf8d6; ++ *((unsigned long *)&__m256i_result[0]) = 0x10fb02864aae9fb4; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe0000fffe0012; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000800080008000; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000004444; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007bbb0000f777; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000004444; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007bbb0000f777; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003ddd80007bbb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003ddd80007bbb; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000f0f0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007878; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007878; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001e00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000f00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c +new file mode 100644 +index 000000000..37b78aa1b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c +@@ -0,0 +1,770 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x2b2b2b2b1bd5d5d6; ++ *((unsigned long *)&__m256i_result[2]) = 0x2a2a2a2af2d5d5d6; ++ *((unsigned long *)&__m256i_result[1]) = 0x2b2b2b2b1bd5d5d6; ++ *((unsigned long *)&__m256i_result[0]) = 0x2a2a2a2af2d5d5d6; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000c0; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000200020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000200020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000200020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000200020; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_result[3]) = 0x40f23232330df9c8; ++ *((unsigned long *)&__m256i_result[2]) = 0x40f2323240f23232; ++ *((unsigned long *)&__m256i_result[1]) = 0x40f23232330df9c8; ++ *((unsigned long *)&__m256i_result[0]) = 0x40f2323240f23232; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100c00000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0ff000000000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000f00f000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0ff000000000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000f00f000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00f8000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x000800f800000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00f8000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x000800f800000000; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000090b0906; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000005060503; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000073737; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xdff8000000000000; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff7f0000ff7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff7f0000ff7f; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000fd0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fd0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007f0000; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_result[2]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_result[1]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_result[0]) = 0xdfc2df80df80df87; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0xdf80ff20df80ff20; ++ *((unsigned long *)&__m256i_result[2]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long *)&__m256i_result[1]) = 0xdf80ff20df80ff20; ++ *((unsigned long *)&__m256i_result[0]) = 0xdfc2ff20df80ffa7; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256i_result[2]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256i_result[0]) = 0x21f8c3c4c0000005; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_result[2]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_result[1]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_result[0]) = 0x8848c848c848c848; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_result[1]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000080c000c080; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fffffff3fffc000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fffffff3fffc000; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x003fffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x003fffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x003fffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x003fffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000007ffffffce; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000808081; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000808081; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000808081; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000808081; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000f18080010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000f18080010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000078c0c0008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000078c0c0008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1086658a18ba3594; ++ *((unsigned long *)&__m256i_op1[2]) = 0x160fe9f000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1086658a18ba3594; ++ *((unsigned long *)&__m256i_op1[0]) = 0x160fe9f000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x07a232640bfc1a73; ++ *((unsigned long *)&__m256i_result[2]) = 0x0a66f497ff9effa9; ++ *((unsigned long *)&__m256i_result[1]) = 0x07a232640bfc1a73; ++ *((unsigned long *)&__m256i_result[0]) = 0x0a66f497ff9effa9; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[3]) = 0x007f00f8ff7fff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fff6a9d8; ++ *((unsigned long *)&__m256i_result[1]) = 0x007f00f8ff7fff80; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff6a9d8; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000807e7ffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000807e7ffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000807e7ffe; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f8080007f007f; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe37fe3001d001d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe37fe3001d001d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff8000; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000f00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000081; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000004a00000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000004a0000002a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000004a00000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000004a0000002a; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fffffffefffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff7fffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fffffffefffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002500000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x00008024ffff8014; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc0002500000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x00008024ffff8014; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000001a00; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000023a20000a121; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000179e0000951d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000023a20000a121; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000179e0000951d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000125100005111; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000c4f00004b0f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000125100005111; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000c4f00004b0f; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000080008001; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000457d607d; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457d607f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000457d607d; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457d607f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffa2beb040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffa2beb040; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000005858585a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000005858585a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000005858585a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000005858585a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000023a300003fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000023a300003fef; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000023a300003fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000023a300003fef; ++ *((unsigned long *)&__m256i_result[3]) = 0x000011d1ac2c4c2d; ++ *((unsigned long *)&__m256i_result[2]) = 0x000011d1ac2c4c25; ++ *((unsigned long *)&__m256i_result[1]) = 0x000011d1ac2c4c2d; ++ *((unsigned long *)&__m256i_result[0]) = 0x000011d1ac2c4c25; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c +new file mode 100644 +index 000000000..3944a6ac0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000018803100188; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000018803100188; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000014402080144; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000086fe0000403e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000403e00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000086fe0000403e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000403e00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000437f0000201f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000201f00002020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000437f0000201f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000201f00002020; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x90007fff90008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0ffffffe90008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x87ffffff87ffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xc880bfffc880c080; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x87ffffffc880c080; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000082a54290; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000028aa700; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000082a54290; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000002a54287; ++ *((unsigned long *)&__m256i_result[3]) = 0x007f00f841532148; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001a753c3; ++ *((unsigned long *)&__m256i_result[1]) = 0x007f00f841532148; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001b52187; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000004444; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007bbb0000f777; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000004444; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007bbb0000f777; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003dde00007bbc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003dde00007bbc; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[3]) = 0x4ffc3f783fc040c0; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fc03f803fc040c0; ++ *((unsigned long *)&__m256i_result[1]) = 0x4ffc3f783fc040c0; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fc03f803fc040c0; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0505070804040404; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0504070804040404; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0505070804040404; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0504070804040404; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0283038402020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0282038402020202; ++ *((unsigned long *)&__m256i_result[1]) = 0x0283038402020202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0282038402020202; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1010101010001000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x101010100000000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0889088908810881; ++ *((unsigned long *)&__m256i_result[2]) = 0x0081010000810100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0889088900810088; ++ *((unsigned long *)&__m256i_result[0]) = 0x0081010000810100; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000001d001d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3e00000440004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3e000004400f400f; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3abb3abbbabababa; ++ *((unsigned long *)&__m256i_result[2]) = 0x0080000000800080; ++ *((unsigned long *)&__m256i_result[1]) = 0x3abb3abbbabababa; ++ *((unsigned long *)&__m256i_result[0]) = 0x0080000000800080; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc0008000c0008000; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100020001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256i_op1[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe3aebaf4df958004; ++ *((unsigned long *)&__m256i_result[3]) = 0xc5890a0a07070707; ++ *((unsigned long *)&__m256i_result[2]) = 0x006be0e4180b8024; ++ *((unsigned long *)&__m256i_result[1]) = 0x1b399540334c966c; ++ *((unsigned long *)&__m256i_result[0]) = 0x71d7dd7aefcac001; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000808080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_result[2]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_result[0]) = 0x000a800b000a800b; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00f9f9f900000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00f9f9f900000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007cfcfd80000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007cfcfd80000001; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff8001ffff8001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff8001ffff8001; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffc0017fffc001; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffc0017fffc001; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0080000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000000a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc5c085372cfabfba; ++ *((unsigned long *)&__m256i_op0[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000501e99b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000109973de7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001020f22; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001890b7a39; ++ *((unsigned long *)&__m256i_result[3]) = 0x1b974ebaf6d64d4e; ++ *((unsigned long *)&__m256i_result[2]) = 0x62e0429c1b48fed1; ++ *((unsigned long *)&__m256i_result[1]) = 0x18b985adf63f548c; ++ *((unsigned long *)&__m256i_result[0]) = 0x032c796ecbdecc3b; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3a2a3a2a3a2a3a2a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3a2a3a2a3aaa45aa; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3a553f7f7a2a3a2a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3a2a3a2a3aaa45aa; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x1d949d949d949d95; ++ *((unsigned long *)&__m256i_result[2]) = 0x1d949d949e1423d4; ++ *((unsigned long *)&__m256i_result[1]) = 0x1de9a03f3dd41d95; ++ *((unsigned long *)&__m256i_result[0]) = 0x1d949d949e1423d4; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000083f95466; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010100005400; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001e001ea1bfa1bf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x001e001e83e5422e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001e001ea1bfa1bf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x011f011f0244420e; ++ *((unsigned long *)&__m256i_result[3]) = 0x000f000fd0dfd0df; ++ *((unsigned long *)&__m256i_result[2]) = 0x000f000f83ef4b4a; ++ *((unsigned long *)&__m256i_result[1]) = 0x000f000fd0dfd0df; ++ *((unsigned long *)&__m256i_result[0]) = 0x0110011001224b07; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000030000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000030000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000018002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000018002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x223d76f09f3881ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x223d76f09f37e357; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43ec0a1b2aba7ed0; ++ *((unsigned long *)&__m256i_result[3]) = 0x111ebb784f9c4100; ++ *((unsigned long *)&__m256i_result[2]) = 0x1c386546809f3b50; ++ *((unsigned long *)&__m256i_result[1]) = 0x111ebb784f9bf1ac; ++ *((unsigned long *)&__m256i_result[0]) = 0x21f6050d955d3f68; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000840100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbffebffec0fe0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000840100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbffebffec0fe0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5fff5fff607f0000; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000457d607d; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457d607f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000457d607d; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457d607f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffa2beb040; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c +new file mode 100644 +index 000000000..3c1a8b8e6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c +@@ -0,0 +1,130 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00555555553f8000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00555555553f8000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000030000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000030000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020643100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020643100000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_result[3]) = 0xa90896a400000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xa90896a400000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f003f003f0040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f003f003f0040; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003f003f003f00; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003f003f003f00; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c +new file mode 100644 +index 000000000..340f7691b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c +@@ -0,0 +1,64 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000007d0d0d0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000007d0d0d0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000007d0d0d00000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000007d0d0d00000; ++ __m256i_out = __lasx_xvbsrl_v (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fffffffe000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fffffffe000000; ++ __m256i_out = __lasx_xvbsrl_v (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000018803100188; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000018803100188; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbsrl_v (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsrl_v (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c +new file mode 100644 +index 000000000..3cd1626d4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c +@@ -0,0 +1,526 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x002e4db200000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000315ac0000d658; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00735278007cf94c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0003ed8800031b38; ++ *((unsigned long *)&__m256i_result[3]) = 0xffd1b24e00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffcea54ffff29a8; ++ *((unsigned long *)&__m256i_result[1]) = 0xff8cad88ff8306b4; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffc1278fffce4c8; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000ffff8000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x06f880008000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800080008000b8f1; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000010180000101; ++ *((unsigned long *)&__m256i_result[2]) = 0xfa08800080000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800080008000480f; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010102; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010201010204; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010102; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010102; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010203; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007380; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000f1c00; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000800000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0081000100810001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0081000100810001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0081000100810001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0081000100810001; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x223d76f09f3881ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x223d76f09f37e357; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ec0a1b2aba7ed0; ++ *((unsigned long *)&__m256i_result[3]) = 0xdec38a1061c87f01; ++ *((unsigned long *)&__m256i_result[2]) = 0xc8903673ffc28a60; ++ *((unsigned long *)&__m256i_result[1]) = 0xdec38a1061c91da9; ++ *((unsigned long *)&__m256i_result[0]) = 0xbd14f6e5d6468230; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000007e8080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fdda7dc4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000007e8080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fdda7dc4; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff827f80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0226823c; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff827f80; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0226823c; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000180000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000180000001; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000008000165a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000008000165a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff00017fff005d; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffe9a6; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff00017fff005d; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffe9a6; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff0100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff0100000001; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100004300000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100004300000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff0000bd00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff0000bd00000000; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000010000080040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000010000080040; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff00fff8ffc0; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000497fe0000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000683fe0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000497fe0000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000683fe0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffb6811fffff80; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff97c120000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffb6811fffff80; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff97c120000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfefefefefdfdfdfd; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefdfdfdfd; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010202020203; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010201010102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010202020203; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010201010102; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000032; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000032; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffce; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00007fde00007fd4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007fe000007fe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007fde00007fd4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fe000007fe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x000081220000812c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000812000008120; ++ *((unsigned long *)&__m256i_result[1]) = 0x000081220000812c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000812000008120; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffd880; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffd880; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c +new file mode 100644 +index 000000000..b4ac50271 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c +@@ -0,0 +1,537 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000800080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc9d8080067f50020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc70000020000c000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf000f00000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000f000f0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf0f008000ff5000f; ++ *((unsigned long *)&__m256i_result[0]) = 0xf00000020000f000; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000fff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000e000e000e000e; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00003fea00013fec; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fe50001c013; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00003fea00013fec; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fe50001c013; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff0000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff0000ff00; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000fff00000fff; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_result[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0xffc0ffc0ffc0ffc0; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00002df900001700; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffe05ffffe911; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00002df900001700; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffe05ffffe911; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffcfffffffc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffcfffffffc; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffdd97dc4; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffdd97dc4; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffdd97dc4; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffdd97dc4; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000007f007f007f; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000003fffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000003fffff; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002000200000022; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0049004200000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000007f00000022; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000007f00000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffefffffefd; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_result[3]) = 0x007fffff007fffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x007fffff007fffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x007fffff007fffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x007fffff007fffff; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001ffff0001ffff; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000080000001000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000080000001000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000f0000000f; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000b8f81b8c840e4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000b8f81b8c840e4; ++ *((unsigned long *)&__m256i_result[3]) = 0x000007ff000007ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000007fffffff800; ++ *((unsigned long *)&__m256i_result[1]) = 0x000007ff000007ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000007fffffff800; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op0[2]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op0[1]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op0[0]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffc00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffc00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffc00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffc00000000; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x22); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff605a; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256i_result[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256i_result[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffff8d9ffa7103d; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x39); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe000000000000; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x31); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001ffffffff; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0fffffffffffffff; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff8; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000002c21ffeff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc0000000c0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000002c21ffeff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc0000000c0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff8; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x32); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c +new file mode 100644 +index 000000000..e5ee89deb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c +@@ -0,0 +1,427 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long *)&__m256i_result[2]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000003f3f3f3f; ++ *((unsigned long *)&__m256i_result[0]) = 0x3f3f3f3f00000000; ++ __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000017f7f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000017f7f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f00000000000000; ++ __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0007ffff0007ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0007ffff0007ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x00071f1f00071f1f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x00071f1f00071f1f; ++ __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000003fff3fff; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x001f001f001f001f; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001fff00001fff; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000003f003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000003f003f; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc0090000c0200060; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc0090000c0200060; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f0000007f0060; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256i_op0[2]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256i_op0[0]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0003000300030000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0003000300030000; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000029170; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000029170; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000203ff; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000f0000000f; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fe000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffcfffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffcfffc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffcfffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffcfffc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000003fff; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0fffffff0fffffff; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe000ffffffff08; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe000ffffffff08; ++ *((unsigned long *)&__m256i_result[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0fffffff0fffffff; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000003ffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000003ffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000003ffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000003ffffffffff; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x29); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x34); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x30); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x419cd5b11c3c5654; ++ *((unsigned long *)&__m256i_result[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_result[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long *)&__m256i_result[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_result[0]) = 0x419cd5b11c3c5654; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x22); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001ff; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007fffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007fffff; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000017f00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007f7f03030000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000017f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f7f03030000; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x37); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvadd-xvadda-xvad.patch b/LoongArch-Add-tests-for-ASX-vector-xvadd-xvadda-xvad.patch new file mode 100644 index 0000000000000000000000000000000000000000..6c06f9abe40adbdd058cf5bcbeaf65678e7a4523 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvadd-xvadda-xvad.patch @@ -0,0 +1,6368 @@ +From a6d51c0d69572f800f63c3215b7de6665024104c Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 15:15:40 +0800 +Subject: [PATCH 099/124] LoongArch: Add tests for ASX vector + xvadd/xvadda/xvaddi/xvaddwev/ xvaddwodxvsadd instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvadd.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvadda.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvaddi.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvadd.c | 725 ++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvadda.c | 785 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvaddi.c | 427 ++++++++++ + .../loongarch/vector/lasx/lasx-xvaddwev-1.c | 740 +++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvaddwev-2.c | 485 +++++++++++ + .../loongarch/vector/lasx/lasx-xvaddwev-3.c | 515 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvaddwod-1.c | 530 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvaddwod-2.c | 560 +++++++++++++ + .../loongarch/vector/lasx/lasx-xvaddwod-3.c | 485 +++++++++++ + .../loongarch/vector/lasx/lasx-xvsadd-1.c | 650 +++++++++++++++ + .../loongarch/vector/lasx/lasx-xvsadd-2.c | 350 ++++++++ + 11 files changed, 6252 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c +new file mode 100644 +index 000000000..293295723 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c +@@ -0,0 +1,725 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffefefffffefe; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x41cfe01dde000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x41cfe01dde000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x41cfe01dde000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x41cfe01dde000000; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffefffefffefffe; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f8000007f7fffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f8000007f7fffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f8000007f7fffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f8000007f7fffff; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff900000800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff900000800; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_result[2]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_result[0]) = 0x00e9a80014ff0000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000956a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000004efffe00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000956a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000004efffe00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xb500000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xb500000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x007fffffffff9569; ++ *((unsigned long *)&__m256i_result[2]) = 0xb50000004efffe00; ++ *((unsigned long *)&__m256i_result[1]) = 0x007fffffffff9569; ++ *((unsigned long *)&__m256i_result[0]) = 0xb50000004efffe00; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff01; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000ffff8000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x06f880008000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800080008000b8f1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000010180000101; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfa08800080000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x800080008000480f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001010000010100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101000000010100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000000010100; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffff605a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffff605a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffff605a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffff605a; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_result[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_result[1]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_result[0]) = 0x55555555aaaaaaac; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000089; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000014402080144; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000008; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202010202020102; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000403f3fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000403f3fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000807e7ffe; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[3]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_result[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[0]) = 0xc2c2c2c2c2c2c2c2; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000006040190d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000006040190d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000860601934; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000860601934; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800200028; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d0005; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000c0000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000040000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020001f001f001e; ++ *((unsigned long *)&__m256i_result[2]) = 0x001f001fc01f001f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020001f001f001e; ++ *((unsigned long *)&__m256i_result[0]) = 0x001f001f401f001f; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7ffeffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7ffeffffffff; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800000ff800000ff; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010000080040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000080040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010000080040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000080040; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffeffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffeffff0000; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c +new file mode 100644 +index 000000000..d6b57d1cd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c +@@ -0,0 +1,785 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101008000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101008000000080; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000402000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000402000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000402000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000402000000; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffeffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100010102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xefefefefefefefef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xefefefefefefefef; ++ *((unsigned long *)&__m256i_op0[1]) = 0xefefefefefefef6e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xeeeeeeeeeeeeeeee; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010101010101012; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101012; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010101010101093; ++ *((unsigned long *)&__m256i_result[0]) = 0x1111111111111113; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0110000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0110000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0110000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0110000000000080; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1000000000000000; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xce7ffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xce7ffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x327f010101010102; ++ *((unsigned long *)&__m256i_result[2]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x327f010101010102; ++ *((unsigned long *)&__m256i_result[0]) = 0x6300000000000000; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff5556aaaa; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff5556aaaa; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00020000aaa95556; ++ *((unsigned long *)&__m256i_result[1]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00020000aaa95556; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdb801b6d0962003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9a7f997fff01ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbe632a4f1c3c5653; ++ *((unsigned long *)&__m256i_result[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_result[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long *)&__m256i_result[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_result[0]) = 0x419cd5b11c3c5654; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x017e01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0586060601fe0202; ++ *((unsigned long *)&__m256i_op0[1]) = 0x017e01fe01fe0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0586060601fe0004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010001000100001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010001000100001; ++ *((unsigned long *)&__m256i_result[3]) = 0x017f01fe01ff01fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x05960616020e0203; ++ *((unsigned long *)&__m256i_result[1]) = 0x017f01fe01ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x05960616020e0005; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010146; ++ *((unsigned long *)&__m256i_result[2]) = 0x01010101010e0106; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010146; ++ *((unsigned long *)&__m256i_result[0]) = 0x01010101010e0106; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010000000100000; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffb79fb74; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffb79fb74; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000010486048c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000010486048c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000006; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000020000; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00010001000c4411; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100044411; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000018; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000019; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000200000001e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000002000000019; ++ *((unsigned long *)&__m256i_op1[3]) = 0x223d76f09f3881ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x223d76f09f37e357; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43ec0a1b2aba7ed0; ++ *((unsigned long *)&__m256i_result[3]) = 0x223d771060c77e19; ++ *((unsigned long *)&__m256i_result[2]) = 0x3870caad013e76b9; ++ *((unsigned long *)&__m256i_result[1]) = 0x223d771060c81cc7; ++ *((unsigned long *)&__m256i_result[0]) = 0x43ec0a3b2aba7ee9; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2535253514141414; ++ *((unsigned long *)&__m256i_result[2]) = 0x2535253500002535; ++ *((unsigned long *)&__m256i_result[1]) = 0x2535253514141414; ++ *((unsigned long *)&__m256i_result[0]) = 0x2535253500002535; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010000f0000000f; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000504f00002361; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff8f81000040e4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000504f00002361; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff8f81000040e4; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000007ff000007ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000007ff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000007ff000007ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000007ff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000584e00002b60; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000787dffffbf1c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000584e00002b60; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000787dffffbf1c; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010200000000; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fef010000010100; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fef010000010100; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fef010000010100; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fef010000010100; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000001fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40b2bf4d30313031; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fffa2bea2be; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40b2bf4d30313031; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fffa2bea2be; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x40b240b330313031; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff5d425d42; ++ *((unsigned long *)&__m256i_result[1]) = 0x40b240b330313031; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff5d425d42; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000100040; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000100080; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff896099cbdbfff1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc987ffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff896099cbdbfff1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc987ffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00769f673424000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x3678000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00769f673424000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x3678000100000001; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000500000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000700000032; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000500000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000700000032; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003feec0108022; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fe9c015802c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003feec0108022; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fe9c015802c; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007f124010c022; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f174015c02c; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f124010c022; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f174015c02c; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x05ea05ea05ea05ec; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020202020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000020202020202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000000010000; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c +new file mode 100644 +index 000000000..054bf6e55 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c +@@ -0,0 +1,427 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x44bb2cd3a35c2fd0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xca355ba46a95e31c; ++ *((unsigned long *)&__m256i_result[3]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_result[2]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_result[1]) = 0x61d849f0c0794ced; ++ *((unsigned long *)&__m256i_result[0]) = 0xe75278c187b20039; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe651bfff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_result[2]) = 0x1d1d1d1ddd9d9d1d; ++ *((unsigned long *)&__m256i_result[1]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_result[0]) = 0x1d1d1d1d046fdd1d; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_result[2]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_result[1]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_result[0]) = 0x1515151515151515; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1818181818181818; ++ *((unsigned long *)&__m256i_result[2]) = 0x1818181818181818; ++ *((unsigned long *)&__m256i_result[1]) = 0x1818181818181818; ++ *((unsigned long *)&__m256i_result[0]) = 0x1818181818181818; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202810102020202; ++ *((unsigned long *)&__m256i_result[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202810102020202; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[2]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[1]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[0]) = 0x0909090909090909; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ffce20; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ffce20; ++ *((unsigned long *)&__m256i_result[3]) = 0x1514151415141514; ++ *((unsigned long *)&__m256i_result[2]) = 0x151415141514e335; ++ *((unsigned long *)&__m256i_result[1]) = 0x1514151415141514; ++ *((unsigned long *)&__m256i_result[0]) = 0x151415141514e335; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_result[2]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_result[1]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_result[0]) = 0x0606060606060606; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1212121212121212; ++ *((unsigned long *)&__m256i_result[2]) = 0x1212121212121212; ++ *((unsigned long *)&__m256i_result[1]) = 0x1212121212121212; ++ *((unsigned long *)&__m256i_result[0]) = 0x1212121212121212; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_result[3]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_result[2]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_result[1]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_result[0]) = 0x1a1a1a2c1a1a1a2c; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x1d1d1d1e1d1d1d1e; ++ *((unsigned long *)&__m256i_result[2]) = 0x1d1d1d1e1d1d1d1e; ++ *((unsigned long *)&__m256i_result[1]) = 0x1d1d1d1e1d1d1d1e; ++ *((unsigned long *)&__m256i_result[0]) = 0x1d1d1d1e1d1d1d1e; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[2]) = 0x5982000200020002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[0]) = 0x5982000200020002; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[3]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x001f001f02c442af; ++ *((unsigned long *)&__m256i_result[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x001f001f02c442af; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80938013800d8002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x807e80fd80fe0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80938013800d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x8091811081118110; ++ *((unsigned long *)&__m256i_result[2]) = 0x80a6802680208015; ++ *((unsigned long *)&__m256i_result[1]) = 0x8091811081110013; ++ *((unsigned long *)&__m256i_result[0]) = 0x80a6802680200018; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000003f00390035; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8015003f0006001f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000003f00390035; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8015003f0006001f; ++ *((unsigned long *)&__m256i_result[3]) = 0x000b004a00440040; ++ *((unsigned long *)&__m256i_result[2]) = 0x8020004a0011002a; ++ *((unsigned long *)&__m256i_result[1]) = 0x000b004a00440040; ++ *((unsigned long *)&__m256i_result[0]) = 0x8020004a0011002a; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_result[2]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_result[1]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_result[0]) = 0x0016001600160016; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_result[3]) = 0xa1bfa1bfa1bfa1bf; ++ *((unsigned long *)&__m256i_result[2]) = 0xa1bfa1bf5e7c5e7c; ++ *((unsigned long *)&__m256i_result[1]) = 0xa1bfa1bfa1bfa1bf; ++ *((unsigned long *)&__m256i_result[0]) = 0xa1bfa1bf5e7c5e7c; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_result[3]) = 0x001a001a001a009a; ++ *((unsigned long *)&__m256i_result[2]) = 0x001a001a002a009a; ++ *((unsigned long *)&__m256i_result[1]) = 0x001a001a001a009a; ++ *((unsigned long *)&__m256i_result[0]) = 0x001a001a002a009a; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x001c001c001c001c; ++ *((unsigned long *)&__m256i_result[2]) = 0x001c001c001c001c; ++ *((unsigned long *)&__m256i_result[1]) = 0x001c001c001c001c; ++ *((unsigned long *)&__m256i_result[0]) = 0x001c001c001d001d; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x721e001e721e001e; ++ *((unsigned long *)&__m256i_result[2]) = 0x721e001e721e001e; ++ *((unsigned long *)&__m256i_result[1]) = 0x721e001e721e001e; ++ *((unsigned long *)&__m256i_result[0]) = 0x721e001e721e001e; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001900000019; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001a0000001a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001a0000001a; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001900000019; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001d0000001d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001d0000001d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001d0000001d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001d0000001d; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000001fffd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000001fffd; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000700020004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000700020004; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000008; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000019410000e69a; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf259905a0c126604; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000883a00000f20; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6d3c2d3aa1c82947; ++ *((unsigned long *)&__m256i_result[3]) = 0x000019410000e6aa; ++ *((unsigned long *)&__m256i_result[2]) = 0xf259905a0c126614; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000883a00000f30; ++ *((unsigned long *)&__m256i_result[0]) = 0x6d3c2d3aa1c82957; ++ __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000000d; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000d; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000d; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000d; ++ __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff0fff0ff01ff14; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff0fff0fff10003; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff0fff0ff01ff14; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff0fff0fff10003; ++ __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff47b4ffff5879; ++ __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0fffffff10000006; ++ __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c +new file mode 100644 +index 000000000..70f3bf783 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c +@@ -0,0 +1,740 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op0[2]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op0[1]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op0[0]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffeffffff88; ++ *((unsigned long *)&__m256i_op1[2]) = 0x61e0000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffeffffff88; ++ *((unsigned long *)&__m256i_op1[0]) = 0x61e0000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010ffc80010ff52; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1ffca0011ffcb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010ffc80010ff52; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1ffca0011ffcb; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffe90ffffff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffe90ffffff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff90ffffff80; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffefffefffefffe; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000023; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01c601c6fe3afe3a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01c601c6fe3afe3a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000011; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x007d003e007d003e; ++ *((unsigned long *)&__m256i_result[2]) = 0x007d003effa80010; ++ *((unsigned long *)&__m256i_result[1]) = 0x007d003e007d003e; ++ *((unsigned long *)&__m256i_result[0]) = 0x007d003effa80010; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ca0000fff80000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ca0000fff80000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5464fbfc416b9f71; ++ *((unsigned long *)&__m256i_op0[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0d8264202b8ea3f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x80c72fcd40fb3bc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x84bd087966d4ace0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x26aa68b274dc1322; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe072db2bb9d4cd40; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffcd42ffffecc0; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000475ffff4c51; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000740dffffad17; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f4bffff7130; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000468600008078; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffff328ffffe021; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op0[2]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op0[1]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op0[0]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[3]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[2]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[1]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[0]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000399400003994; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op1[1]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000616100004f61; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000b8f81b8c840e4; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000b8f81b8c840e4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000504f00002361; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff8f81000040e4; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000504f00002361; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff8f81000040e4; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000012; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x40b240b330313031; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff5d425d42; ++ *((unsigned long *)&__m256i_op1[1]) = 0x40b240b330313031; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff5d425d42; ++ *((unsigned long *)&__m256i_result[3]) = 0x000040b200002fd4; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff0000739c; ++ *((unsigned long *)&__m256i_result[1]) = 0x000040b200002fd4; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff0000739c; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007fde00007fd4; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fe000007fe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007fde00007fd4; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fe000007fe0; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002e2100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000012e2110; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000583800; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000583800; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000100000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007bbbbbbb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007bbbbbbb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000073333333; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000073333333; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007f807f007e8080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f807f007e806f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007f807f007e8080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f807f007e806f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000007e8080; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007e8092; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000007e8080; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007e8092; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000062d4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000006338; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff80000000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfe01fe01fc01fc01; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe01fe01fc01fc01; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000003fc03bbc; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1b9763952fc4c101; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe37affb42fc05f69; ++ *((unsigned long *)&__m256i_op1[1]) = 0x18b988e64facb558; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe5fb66c81da8e5bb; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xe37affb42fc05f69; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x65fb66c81da8e5ba; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1010101010101012; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1010101010101012; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1010101010101093; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1111111111111113; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101110101011; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1111111211111112; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5980000000000000; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffefe00000000; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000002800000010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000002800000010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff0127000c0010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff012700040010; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8001000180010000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8001000180010000; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800200000002; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000020000000200; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe97c020010001; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000001e001e001e0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000001e001e001e0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c +new file mode 100644 +index 000000000..22528a14f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c +@@ -0,0 +1,485 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010000000000000; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffc0003fffa0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fb010201f900ff; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000005554; ++ *((unsigned long *)&__m256i_op1[2]) = 0xaaaa0000aaacfffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000005554; ++ *((unsigned long *)&__m256i_op1[0]) = 0xaaaa0000aaacfffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000054; ++ *((unsigned long *)&__m256i_result[2]) = 0x00aa000000ac00fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000054; ++ *((unsigned long *)&__m256i_result[0]) = 0x00aa000000ac00fe; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x017f01fe01ff01fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x05960616020e0203; ++ *((unsigned long *)&__m256i_op0[1]) = 0x017f01fe01ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x05960616020e0005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x017f01fe01ff01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x05960616020e0203; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017f01fe01ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x05960616020e0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x00fe01fc01fe01fc; ++ *((unsigned long *)&__m256i_result[2]) = 0x012c002c001c0006; ++ *((unsigned long *)&__m256i_result[1]) = 0x00fe01fc01fe0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x012c002c001c000a; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op0[1]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0007000000fb00ef; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ea005600f90090; ++ *((unsigned long *)&__m256i_result[1]) = 0x0007000000fb00ef; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ea005600f90090; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffc03b1fc5e050; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6a9e3fa2603a2000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffc03b1fc5e050; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6a9e3fa2603a2000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc03fffffffc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffc00000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc03fffffffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffc00000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_result[2]) = 0x019d00a2003a0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_result[0]) = 0x019d00a2003a0000; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00e30064001a008f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00e3006300e30063; ++ *((unsigned long *)&__m256i_result[1]) = 0x00e30064001a008f; ++ *((unsigned long *)&__m256i_result[0]) = 0x00e3006300e30063; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000013; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000a400ff004f; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002ffff00020002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x04f504f104f504f5; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002ffff00020002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x04f504f104f504f5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000200ff00020002; ++ *((unsigned long *)&__m256i_result[2]) = 0x00f500f100f500f5; ++ *((unsigned long *)&__m256i_result[1]) = 0x000200ff00020002; ++ *((unsigned long *)&__m256i_result[0]) = 0x00f500f100f500f5; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000019410000e69a; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf259905a0c126604; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000883a00000f20; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6d3c2d3aa1c82947; ++ *((unsigned long *)&__m256i_op1[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc5c085372cfabfba; ++ *((unsigned long *)&__m256i_op1[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000b6b60001979a; ++ *((unsigned long *)&__m256i_result[2]) = 0x00011591000125be; ++ *((unsigned long *)&__m256i_result[1]) = 0x000093950000a915; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001201600004783; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffff6ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffff6ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff000000ff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff000000ff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff050000ff3c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fff90000ff78; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffa80000ff31; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffc7f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffc000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffc7f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffc000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000b0b100015d1e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001fffe0001bfff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000b0b100015d1e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001fffe0001bfff; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fe200000fe1f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fe200000fe1f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffc0ffc1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f00000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffc0ffc1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f00000000003f; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001fffe0001ffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0001003e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001fffe0001ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0001003e; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020010101610000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0061200000610000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020010101610000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0061200000610000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000101000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00011fff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000101000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00011fff0000ffff; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000013ffffffec; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000013ffffebd8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000013ffffffec; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000013ffffebd8; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffec; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffebd8; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffec; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffebd8; ++ __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000c0007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000c0007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3abb3abbbabababa; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0080000000800080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3abb3abbbabababa; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0080000000800080; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000babababa; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000008c0087; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000babababa; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000008c0087; ++ __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000000a; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8060000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8060000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x805f0000ffffffff; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe01fe010000fd02; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe01fe010000fd02; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfe01fe010000fd02; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe01fe010000fd02; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f807f80; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c +new file mode 100644 +index 000000000..38a0a53d7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[3]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_result[2]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_result[1]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_result[0]) = 0x0036003200360032; ++ __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000170017; ++ __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000100fe000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000100fe00010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x000100fe000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000100fe00010001; ++ __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000bf6e0000c916; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000030000fff3; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000b004a00440040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8020004a0011002a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000b004a00440040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8020004a0011002a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000004a00000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004a0000002a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000004a00000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004a0000002a; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001fff00001fff; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001a001a001a009a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001a001a002a009a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001a001a001a009a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001a001a002a009a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001a000000da; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001a000000da; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001a000000da; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001a000000da; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000007ffffffce; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000001fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000001ce; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000001fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001ce; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0000; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8011ffae800c000c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00baff050083ff3c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80b900b980380038; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0017ffa8008eff31; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff800c000c; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000084ff3c; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff80380038; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000008fff31; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001001f001e; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001001f001e; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff00ff; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100f000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100f000ff; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff0ffc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff0ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff78ffc0; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000016e00; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff1cff1c; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffff1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffff1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010ffc80010ff52; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff1ffca0011ffcb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010ffc80010ff52; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff1ffca0011ffcb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1ffca0011feca; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1ffca0011feca; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000002; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000017fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000017fff; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c +new file mode 100644 +index 000000000..a4dc565e9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c +@@ -0,0 +1,530 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9240000000008025; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffff24affff8025; ++ *((unsigned long *)&__m256i_op0[1]) = 0xb2c0000000008006; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffb341ffff8006; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9240000000008025; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffff24affff8025; ++ *((unsigned long *)&__m256i_op1[1]) = 0xb2c0000000008006; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffb341ffff8006; ++ *((unsigned long *)&__m256i_result[3]) = 0xff2400000000ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffeffe4fffeff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xff6400000000ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffeff66fffeff00; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff04ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff04ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffefffefffefffe; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe0000fffe0002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe0000fffe0002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000fffeffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000fffeffff; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffc0003fffc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffc0003fffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7ffeffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7ffeffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f007bfffffffb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f007bfffffffb; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe0ffe000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fa0001fff808000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe0ffe000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fa0001fff808000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f0000ffffff80; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f0000ffffff80; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000400040004; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000001fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001fe; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000d24; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000d24; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4ffc3f7800000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3fc03f6400000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4ffc3f7800000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3fc03f6400000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000050fd00000101; ++ *((unsigned long *)&__m256i_result[2]) = 0x000040c100000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x000050fd00000101; ++ *((unsigned long *)&__m256i_result[0]) = 0x000040c100000101; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007fff; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000006d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000400008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000006d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000400008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000800080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc9d8080067f50020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc70000020000c000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe06df0d7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffbe8b470f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007ffffffff7ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x49d8080067f4f81f; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffebeb8; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffebeb8; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1111111111111111; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffeffffffdd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c +new file mode 100644 +index 000000000..a2fbe9ed0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c +@@ -0,0 +1,560 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe00007f000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff017e01fe; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00b7003600120000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00b7006200fc0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00b7004100190004; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000008e7c00; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000067751500; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000008e7c00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000067751500; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000007a00f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff01640092; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000007a00f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff01640092; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff000000ff0000; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff008000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff008000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff008000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff008000000000; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff008000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff008000ff0000; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000804000004141; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00017fff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000004444; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007bbb0000f777; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000004444; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007bbb0000f777; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4010000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4010000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003e6c0000cb7a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003e6c0000cb7a; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3aadec4f6c7975b1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3abac5447fffca89; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3aadec4f6c7975b1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3abac5447fffca89; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3aadec4f6c7975b1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3abac5447fffca89; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3aadec4f6c7975b1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3abac5447fffca89; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000755a0000d8f2; ++ *((unsigned long *)&__m256i_result[2]) = 0x000075740000fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000755a0000d8f2; ++ *((unsigned long *)&__m256i_result[0]) = 0x000075740000fffe; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffee00ba; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffee00ba; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffee; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffee; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9ffffd8020010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff9fffffff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9ffffd8020010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff9fffffff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x00009fff00002001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00009fff00002001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000001a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000001a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100010000; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00153f1594ea02ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffff0100; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff15c1ea95ea02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000153f15; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff15c1ea; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100fe04ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100fe04ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff00ff; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00f9f9f900000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00f9f9f900000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000faf3f3f2; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000029170; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000029170; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc3f0c3f0c3f0c3f0; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xe6e8e6e8e6e8d719; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xe6e8e6e8e6e8d719; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000003fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000003fffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c +new file mode 100644 +index 000000000..8c98fc4be +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c +@@ -0,0 +1,485 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000033e87ef1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002e2100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000033007e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000021; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020002000400040; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007fc0083fc7c007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007fc0083fc7c007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffc0003fffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffc0003fffc0; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff00bb; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0057; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff00bb; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0057; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000005060503; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000073737; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000050007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000039; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000007070707; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0102040000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000020100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0703020000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000707; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000070300000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007fffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007fffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000100640000ff92; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000100640000ff92; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007c0100007c01; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007c0100007c00; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007c0100007c01; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007c0100007c00; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffe0000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000048; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000048; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000010; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fffe00009fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fffe00002001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffe00009fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fffe00002001; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010080; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007f00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7ffe7fffeffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffd84900000849; ++ *((unsigned long *)&__m256i_op0[0]) = 0x07fffc670800f086; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x311d9b643ec1fe01; ++ *((unsigned long *)&__m256i_op1[0]) = 0x344ade20fe00fd01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007f00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x311d73ad3ec2064a; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff80cbfffffdf8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000081500000104; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffa4fffffffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000700000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff80cbfffffdf8; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffa4fffffffd; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000008050501; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_op0[2]) = 0x019d00a20039fff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_op0[0]) = 0x019d00a2003a0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe007a01c40110; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001ffffe00200000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001ffffe00200000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0020001d001f; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000fef0ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000fef0ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080ff0080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080ff0080; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000400080ffc080; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007f010000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007f010000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c +new file mode 100644 +index 000000000..2a4f29b50 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_result[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0xa020202020206431; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ff050000ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fff90000ff78; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffa80000ff31; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff050000ff3c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fff90000ff78; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffa80000ff31; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff810011; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000200000008; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000200000008; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7efefefe80ffffff; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00007fde00007fd4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007fe000007fe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007fde00007fd4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fe000007fe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff7eddffff7ed3; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff7edfffff7edf; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff7eddffff7ed3; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff7edfffff7edf; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffc81aca; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003a0a9512; ++ *((unsigned long *)&__m256i_op0[1]) = 0x280ac9da313863f4; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe032c739adcc6bbd; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100020001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffdffffffc81aca; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff3a0b9512; ++ *((unsigned long *)&__m256i_result[1]) = 0x280bc9db313a63f5; ++ *((unsigned long *)&__m256i_result[0]) = 0xe032c738adcb6bbb; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1fe01e0100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x1fe01e0100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffffa; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf800d0d8ffffeecf; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf800d0d8ffffeecf; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf000f000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf000f000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xe800c0d8fffeeece; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff383efffedf0c; ++ *((unsigned long *)&__m256i_result[1]) = 0xe800c0d8fffeeece; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff383efffedf0c; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe0000fffe0002; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe0000fffe0002; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[2]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[0]) = 0x7575757575757575; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fc00fc00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fc00fc00; ++ *((unsigned long *)&__m256i_result[3]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fc00fc00; ++ *((unsigned long *)&__m256i_result[1]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fc00fc00; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000007b00f9007e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000007b00f9007e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000007b00f9007e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000007b00f9007e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000f601f200fc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000f601f200fc; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000007f00000022; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000007f00000022; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000007f00000000; ++ __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[0]) = 0x1c1b1a191c1b1a19; ++ __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffb5ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_result[1]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_result[0]) = 0xffb5ff80ffd0ffd8; ++ __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000403f3fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000403f3fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000807e7ffe; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[3]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_result[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[0]) = 0xc2c2c2c2c2c2c2c2; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000006040190d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000006040190d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000860601934; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000860601934; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800200028; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c +new file mode 100644 +index 000000000..a3afc9811 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c +@@ -0,0 +1,350 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001700080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001700080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001700080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001700080; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x43d03bfff827ea21; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43e019c657c7d050; ++ *((unsigned long *)&__m256i_op1[3]) = 0x43d03bfff827ea21; ++ *((unsigned long *)&__m256i_op1[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43e019c657c7d050; ++ *((unsigned long *)&__m256i_result[3]) = 0x86ff76ffff4eff42; ++ *((unsigned long *)&__m256i_result[2]) = 0x86ffffffffff9eff; ++ *((unsigned long *)&__m256i_result[1]) = 0x86ff76ffff4effff; ++ *((unsigned long *)&__m256i_result[0]) = 0x86ff32ffaeffffa0; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff8910ffff7e01; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff3573ffff8960; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff8910ffff1ca9; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffff5e5ffff8130; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff8910ffff7e01; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff3573ffff8960; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff8910ffff1ca9; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffff5e5ffff8130; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000f90; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffe200000020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fffe00008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffe200000020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fffe00008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[3]) = 0x7575ffff75757595; ++ *((unsigned long *)&__m256i_result[2]) = 0x7575ffff7575f575; ++ *((unsigned long *)&__m256i_result[1]) = 0x7575ffff75757595; ++ *((unsigned long *)&__m256i_result[0]) = 0x7575ffff7575f575; ++ __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000f0f0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000f0f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000f0f0; ++ __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000001400; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000001400; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000003c01ff9; ++ __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_result[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_result[0]) = 0x45c5c5c545c5c5c5; ++ __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000080; ++ __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvand-xvandi-xvan.patch b/LoongArch-Add-tests-for-ASX-vector-xvand-xvandi-xvan.patch new file mode 100644 index 0000000000000000000000000000000000000000..3b8855b896519c258d162297a728eb2a85317627 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvand-xvandi-xvan.patch @@ -0,0 +1,1854 @@ +From ceef99197d4db1d34e5c8aeae2b5492d831685d0 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 15:42:34 +0800 +Subject: [PATCH 105/124] LoongArch: Add tests for ASX vector + xvand/xvandi/xvandn/xvor/xvori/ xvnor/xvnori/xvxor/xvxori instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvand.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvandi.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvandn.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvnor.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvnori.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvor.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvori.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvorn.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvxor.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvxori.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvand.c | 155 +++++++++++ + .../loongarch/vector/lasx/lasx-xvandi.c | 196 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvandn.c | 125 +++++++++ + .../loongarch/vector/lasx/lasx-xvnor.c | 170 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvnori.c | 152 +++++++++++ + .../loongarch/vector/lasx/lasx-xvor.c | 215 +++++++++++++++ + .../loongarch/vector/lasx/lasx-xvori.c | 141 ++++++++++ + .../loongarch/vector/lasx/lasx-xvorn.c | 245 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvxor.c | 185 +++++++++++++ + .../loongarch/vector/lasx/lasx-xvxori.c | 163 ++++++++++++ + 10 files changed, 1747 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c +new file mode 100644 +index 000000000..e485786dd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c +@@ -0,0 +1,155 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfefee00000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefee00000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefee00000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefee00000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000001c; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c +new file mode 100644 +index 000000000..26cddc53a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c +@@ -0,0 +1,196 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xe2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000505; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256i_result[3]) = 0x001151510a431048; ++ *((unsigned long *)&__m256i_result[2]) = 0x5b0b08425b09011a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5b5b58595b031019; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x5b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[0]) = 0x0400040004000400; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff900000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff900000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x3f3f3f3900000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x3f3f3f3900000003; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[1]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[0]) = 0xbabababababababa; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xba); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000404040004040; ++ *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000404040004040; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x40); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff31; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5e5e5e5e5e5e5e1c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5e5e5e5e5e5e5e10; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x5e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x86); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f70000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x7f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xa3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x98); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xd9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xcc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c +new file mode 100644 +index 000000000..bc3590c21 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c +@@ -0,0 +1,125 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1828f0e09bad7249; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07ffc1b723953cec; ++ *((unsigned long *)&__m256i_op0[1]) = 0x61f2e9b333aab104; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6bf742aa0d7856a0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000019410000e69a; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf259905a09c23be0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000883a00000f20; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6d3c2d3a89167aeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000090100008492; ++ *((unsigned long *)&__m256i_result[2]) = 0xf000104808420300; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000e20; ++ *((unsigned long *)&__m256i_result[0]) = 0x04082d108006284b; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffdfffdfffdfffd; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_result[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xefdfefdfefdfefdf; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c +new file mode 100644 +index 000000000..3a491ecab +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c +@@ -0,0 +1,170 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x40d74f979f99419f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xbf28b0686066be60; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffff6ff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffff6ff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000900ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000900ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8888888808888888; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0888888888888888; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8888888808888888; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0888888888888888; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x77777777f7777777; ++ *((unsigned long *)&__m256i_result[2]) = 0xf777777777777777; ++ *((unsigned long *)&__m256i_result[1]) = 0x77777777f7777777; ++ *((unsigned long *)&__m256i_result[0]) = 0xf777777777777777; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40ff40ff40ff40ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x407b40ff40ff40f1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40ff40ff40ff40ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x407b40ff40ff40f1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x40ff40ff40ff40ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x407b40ff40ff40f1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x40ff40ff40ff40ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x407b40ff40ff40f1; ++ *((unsigned long *)&__m256i_result[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_result[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_result[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_result[0]) = 0xbf84bf00bf00bf0e; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffbdff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xa000a0009f80ffcc; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffbdff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xa000a0009f80ffcc; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[3]) = 0x6f6f6f6f6f6f6f6f; ++ *((unsigned long *)&__m256i_result[2]) = 0x6f6f6f6f6f6f6f6f; ++ *((unsigned long *)&__m256i_result[1]) = 0x6f6f6f6f6f6f6f6f; ++ *((unsigned long *)&__m256i_result[0]) = 0x6f6f6f6f6f6f6f6f; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000300030000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffc000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff000300030000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffc000; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x800fffffffffffff; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c +new file mode 100644 +index 000000000..995a34c18 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c +@@ -0,0 +1,152 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_result[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_result[0]) = 0x45c5c5c545c5c5c5; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007773; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003373; ++ *((unsigned long *)&__m256i_result[3]) = 0xbbbbbbbbbbbbbbbb; ++ *((unsigned long *)&__m256i_result[2]) = 0xbbbbbbbbbbbb8888; ++ *((unsigned long *)&__m256i_result[1]) = 0xbbbbbbbbbbbbbbbb; ++ *((unsigned long *)&__m256i_result[0]) = 0xbbbbbbbbbbbb8888; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x44); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_result[2]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_result[1]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_result[0]) = 0xdededededededede; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x33); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[0]) = 0x9090909090909090; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x6f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0xf7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x5858585858585858; ++ *((unsigned long *)&__m256i_result[2]) = 0x5858585858585858; ++ *((unsigned long *)&__m256i_result[1]) = 0x5858585858585858; ++ *((unsigned long *)&__m256i_result[0]) = 0x5858585858585858; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0xa7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_result[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0xc2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x9d9d9d9d9d9d9d8d; ++ *((unsigned long *)&__m256i_result[2]) = 0x9d9d9d9d9d9d9d9d; ++ *((unsigned long *)&__m256i_result[1]) = 0x9d9d9d9d9d9d9d8d; ++ *((unsigned long *)&__m256i_result[0]) = 0x9d9d9d9d9d9d9d9d; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x62); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[2]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[1]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[0]) = 0x2a2a2a2a2a2a2a2a; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0xd5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000081220000812c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000812000008120; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000081220000812c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000812000008120; ++ *((unsigned long *)&__m256i_result[3]) = 0xe9e968c9e9e968c1; ++ *((unsigned long *)&__m256i_result[2]) = 0xe9e968c9e9e968c9; ++ *((unsigned long *)&__m256i_result[1]) = 0xe9e968c9e9e968c1; ++ *((unsigned long *)&__m256i_result[0]) = 0xe9e968c9e9e968c9; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c +new file mode 100644 +index 000000000..27eef710d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c +@@ -0,0 +1,215 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff7fff7fff; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000005e02; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000089; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fe37fff001fffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fe37fff001fffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fffffff; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x003f60041f636003; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1fff1fff1fff1; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800080ff800080ff; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff80007fff0000; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c +new file mode 100644 +index 000000000..ee91af95f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c +@@ -0,0 +1,141 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_result[2]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_result[1]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_result[0]) = 0x6c6c6c6c6c6c6c6c; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x6c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff00fffffff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x9f9f9f9f9f9f9f9f; ++ *((unsigned long *)&__m256i_result[2]) = 0x9f9f9f9fffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x9f9f9f9f9f9f9f9f; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff9fffffffff; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x9f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x6a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff7effffff46; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff7effffff46; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x42); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[1]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0xbf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x2c2c2c2c2c2c2c2c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x2c2c2c2c2c2c2c2c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_result[2]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_result[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_result[0]) = 0x5252525252525252; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x52); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fe363637fe36363; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x63); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefeffe0e0e0; ++ *((unsigned long *)&__m256i_result[1]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefeffe0e0e0; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0xe0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_result[2]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_result[1]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_result[0]) = 0x6b6b6b6b6b6b6b6b; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x6b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c +new file mode 100644 +index 000000000..fa6cdff31 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c +@@ -0,0 +1,245 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbf28b0686066be60; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x40d74f979f99419f; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01480000052801a2; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffdcff64; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002555500000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002555500000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffdaaaaffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000236200005111; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000175e0000490d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000236200005111; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000175e0000490d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffdfffffffdffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffddffdeffb5ff8d; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffdfffffffdffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffddffdeffb5ff8d; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00feffff00fe81; ++ *((unsigned long *)&__m256i_result[2]) = 0xfe01fe51ff00ff40; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00feffff00fe81; ++ *((unsigned long *)&__m256i_result[0]) = 0xfe01fe51ff00ff40; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0df9f8e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0df9f8e; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe0df9f8f; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffe0df9f8f; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff7fffffff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff7fffffff7fff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdf80df80df80dfff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffdf80dfff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x498100814843ffe1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4981008168410001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x498100814843ffe1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4981008168410001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff896099cbdbfff1; ++ *((unsigned long *)&__m256i_result[2]) = 0xc987ffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff896099cbdbfff1; ++ *((unsigned long *)&__m256i_result[0]) = 0xc987ffffffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_result[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_result[0]) = 0x000020a4ffffbe4f; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0xffbffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_result[1]) = 0xffbffffffffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffa; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c +new file mode 100644 +index 000000000..18b36c873 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c +@@ -0,0 +1,185 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256i_result[2]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256i_result[1]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00c100c100c100c1; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00c100c100c100c1; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100000001000100; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000f91; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000f91; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000f90; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6040190d20227a78; ++ *((unsigned long *)&__m256i_op0[1]) = 0x132feeabd2d33b38; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x9fe7fffffffff32e; ++ *((unsigned long *)&__m256i_result[2]) = 0x6040190ddfdd8587; ++ *((unsigned long *)&__m256i_result[1]) = 0xecd011542d2cc4c7; ++ *((unsigned long *)&__m256i_result[0]) = 0x6040190dffffffff; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000101000001010; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c +new file mode 100644 +index 000000000..8fd6298f7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c +@@ -0,0 +1,163 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_result[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[2]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long *)&__m256i_result[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[0]) = 0xc2c2c2c2c2c29cc0; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xc2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1616161616161616; ++ *((unsigned long *)&__m256i_op0[2]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe16167f161616; ++ *((unsigned long *)&__m256i_op0[0]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xc7c7c7c7c7c7c7c7; ++ *((unsigned long *)&__m256i_result[2]) = 0xc7c7c7c7ae2e2e2e; ++ *((unsigned long *)&__m256i_result[1]) = 0xae2fc7c7aec7c7c7; ++ *((unsigned long *)&__m256i_result[0]) = 0xc7c7c7c7ae2e2e2e; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xd1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_result[2]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_result[1]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_result[0]) = 0x5353535353535353; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x53); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6d6d6d6d6d6d6d6d; ++ *((unsigned long *)&__m256i_result[2]) = 0x6d6d6d6d6d6d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x6d6d6d6d6d6d6d6d; ++ *((unsigned long *)&__m256i_result[0]) = 0x6d6d6d6d6d6d6d6d; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x6d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_result[2]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_result[1]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_result[0]) = 0x8e8e8e8e8e8e8e8e; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x71); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[2]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[0]) = 0x7575757575757575; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x75); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xa4a4a4a4a4a4a4a4; ++ *((unsigned long *)&__m256i_result[2]) = 0xa4a4a4a4a4a4a4a4; ++ *((unsigned long *)&__m256i_result[1]) = 0xa4a4a4a4a4a4a4a4; ++ *((unsigned long *)&__m256i_result[0]) = 0xa4a4a4a4a4a4a4a4; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xa4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_result[2]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_result[1]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_result[0]) = 0xa1a1a1a15e5e5e5e; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xa1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8d8d72728d8d7272; ++ *((unsigned long *)&__m256i_result[2]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long *)&__m256i_result[1]) = 0x8d8d72728d8d7272; ++ *((unsigned long *)&__m256i_result[0]) = 0x8d8d72728d8d8d8d; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x8d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xb3b3b3b3b3b3b3b3; ++ *((unsigned long *)&__m256i_result[2]) = 0xb3b3b3b3b3b3b3b3; ++ *((unsigned long *)&__m256i_result[1]) = 0xb3b3b3b3b3b3b3b3; ++ *((unsigned long *)&__m256i_result[0]) = 0xb3b3b3b3b3b3b3b3; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x4c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f0000ff807f81; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f0000ff807f81; ++ *((unsigned long *)&__m256i_result[3]) = 0x5d5d5d5d5d22a2a2; ++ *((unsigned long *)&__m256i_result[2]) = 0xa2dda2a25d22dd23; ++ *((unsigned long *)&__m256i_result[1]) = 0x5d5d5d5d5d22a2a2; ++ *((unsigned long *)&__m256i_result[0]) = 0xa2dda2a25d22dd23; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xa2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256i_result[2]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256i_result[1]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256i_result[0]) = 0xd3d3d3d3d3d3d3d3; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xd3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_result[3]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_result[2]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_result[1]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_result[0]) = 0x8768876887688769; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x7d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvbitclr-xvbitclr.patch b/LoongArch-Add-tests-for-ASX-vector-xvbitclr-xvbitclr.patch new file mode 100644 index 0000000000000000000000000000000000000000..8854a1369811b4daaf9190033036e786b0dcab84 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvbitclr-xvbitclr.patch @@ -0,0 +1,5057 @@ +From a6390d1a6619b6bee4fc87b15ffd25936704eb21 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 15:57:18 +0800 +Subject: [PATCH 108/124] LoongArch: Add tests for ASX vector + xvbitclr/xvbitclri/xvbitrev/xvbitrevi/ + xvbitsel/xvbitseli/xvbitset/xvbitseti/xvclo/xvclz/xvpcnt instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvbitset.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvclo.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvclz.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvbitclr.c | 635 +++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvbitclri.c | 515 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvbitrev.c | 650 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvbitrevi.c | 317 +++++++++ + .../loongarch/vector/lasx/lasx-xvbitsel.c | 134 ++++ + .../loongarch/vector/lasx/lasx-xvbitseli.c | 185 +++++ + .../loongarch/vector/lasx/lasx-xvbitset.c | 620 +++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvbitseti.c | 405 +++++++++++ + .../loongarch/vector/lasx/lasx-xvclo.c | 449 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvclz.c | 504 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvpcnt.c | 526 ++++++++++++++ + 11 files changed, 4940 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c +new file mode 100644 +index 000000000..def7b588e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c +@@ -0,0 +1,635 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffff1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffff1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000105fffffefb; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff02000000fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000105fffffefb; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff02000000fe; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7ffffffffffff1f; ++ *((unsigned long *)&__m256i_result[2]) = 0xbffffffffffffeff; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7ffffffffffff1f; ++ *((unsigned long *)&__m256i_result[0]) = 0xbffffffffffffeff; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fff7fff7fffdefd; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002555500000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002555500000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002555400000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002555400000000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000002a542a; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000200020002; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000fff00004542; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000fff00004542; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000fff00004542; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000fff00004542; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00fe00feff02fe; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00fe00feff027f; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00fe00feff02fe; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00fe00feff027f; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000023a20000a121; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000179e0000951d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000023a20000a121; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000179e0000951d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000000100; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000236200005111; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000175e0000490d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000236200005111; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000175e0000490d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffeeffaf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffeeffaf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000165e0000480d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000165e0000480d; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007fee; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fefe7f00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fefe7f00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff00000000; ++ __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffefffe00000000; ++ __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fe70000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fe70000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fe70000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fe70000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007f7f80007fa3; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f670000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f7f80007fa3; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f670000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffeffff10000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffeffff10000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ffffffffffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ffffffffffffffe; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3e8000003e800000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3e8000003e800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3e8000003e800000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3e8000003e800000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00001ff8d8d8c000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00001ff8d8d90000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00001ff8d8d8c000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00001ff8d8d90000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001ef8d8d8c000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00001ef8d8d80000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001ef8d8d8c000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001ef8d8d80000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fffe0000000c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffe0000000c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefee00000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefee00000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fff000000000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ffff88ff88; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ff007f007f00; ++ __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffeffffff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffeffffff00; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c +new file mode 100644 +index 000000000..713eb19d5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe06df8d7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffbe8b470f; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe06df0d7; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffbe8b470f; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010ffc80010ff52; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff1ffca0011ffcb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010ffc80010ff52; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff1ffca0011ffcb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010bfc80010bf52; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1bfca0011bfcb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010bfc80010bf52; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1bfca0011bfcb; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000005536aaaaac; ++ *((unsigned long *)&__m256i_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000005536aaaaac; ++ *((unsigned long *)&__m256i_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000005136aaaaa8; ++ *((unsigned long *)&__m256i_result[2]) = 0x55515551aaaaaaa8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000005136aaaaa8; ++ *((unsigned long *)&__m256i_result[0]) = 0x55515551aaaaaaa8; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fdf000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fdf000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fdf7fff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fdf7fff00000000; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000fd0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000fd0000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000807e7ffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f7e7f7e7f7e7f7e; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f7e7f7e; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f7e7f7e7f7e0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007e7f7e; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x24); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf01010153a10101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf01010153a10101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xcf01010143a10101; ++ *((unsigned long *)&__m256i_result[2]) = 0x4b6f01ef4b6f00ef; ++ *((unsigned long *)&__m256i_result[1]) = 0xcf01010143a10101; ++ *((unsigned long *)&__m256i_result[0]) = 0x4b6f01ef4b6f00ef; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xdfffffffdfffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xdfffffffdfffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff7fff7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff7f027f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff7f0100; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00fe00fe7f027f; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256i_result[3]) = 0x8011ffae800c000c; ++ *((unsigned long *)&__m256i_result[2]) = 0x00baff050083ff3c; ++ *((unsigned long *)&__m256i_result[1]) = 0x80b900b980380038; ++ *((unsigned long *)&__m256i_result[0]) = 0x0017ffa8008eff31; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_op0[2]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_op0[0]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_result[2]) = 0x23222120171e151c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_result[0]) = 0x23222120171e151c; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fefe0000fefe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fefe0000fefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00fe00fe00fe00fe; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x26); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fe1ffe0ffe1ffe0; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffb; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffffb; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800200028; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffee00ba; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffee00ba; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xefefefefefee00aa; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xefefefefefee00aa; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000f788f788; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffefffefffefffe; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c +new file mode 100644 +index 000000000..2b0e7f8d1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0501030102141923; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffd5020738b43ddb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x010200023b8e4174; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff4ff4e11410b40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01fa022a01a401e5; ++ *((unsigned long *)&__m256i_op1[2]) = 0x030d03aa0079029b; ++ *((unsigned long *)&__m256i_op1[1]) = 0x024c01f901950261; ++ *((unsigned long *)&__m256i_op1[0]) = 0x008102c2008a029f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101070102041903; ++ *((unsigned long *)&__m256i_result[2]) = 0xdfd506073ab435db; ++ *((unsigned long *)&__m256i_result[1]) = 0x110202023bae4176; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff6ff4a15418b40; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0edf8d7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffbe8bc70f; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe0edf8d7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffbe8bc70f; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe06df8d7; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffbe8b470f; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000010000ffe1; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000101001e18; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000010000ffe1; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000101001e18; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefefe; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1d1a1b181d1a1b18; ++ *((unsigned long *)&__m256i_result[2]) = 0x9c9b9a999c9b9a99; ++ *((unsigned long *)&__m256i_result[1]) = 0x1d1a1b181d1a1b18; ++ *((unsigned long *)&__m256i_result[0]) = 0x9c9b9a999c9b9a99; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000033e87ef1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002e2100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x80008000b3e8fef1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x80008000802ea100; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c80780000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c80780000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0200000200000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2c27000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0200000200000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2c27000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000400000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000800080008000; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff00ff00ffff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff000000ff00ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffff00ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00000000ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000180000000; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x8001800180018001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x8001800180018001; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080000200000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00010002; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f6f7f7f7f6; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f6f7f7f7f6; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f6f7f7f7f6; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f6f7f7f7f6; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7eeefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x7eeefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000010000fffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000010000fffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000fffe; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000004; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000008000b; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000008000b; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000008000a; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000008000a; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000a; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000100010001fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000100010001fffe; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x40fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x40fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x40fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x40fe00fe00fe00fe; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffc0007ffe0002; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000400000018002; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffc0007ffe0002; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000400000018002; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefe01010101; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefe01010101; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000006d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000400008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000006d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000400008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x010101010101016c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101410128; ++ *((unsigned long *)&__m256i_result[1]) = 0x010101010101016c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101410128; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x800000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x800000ff000000ff; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffb6811fffff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff97c120000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffb6811fffff80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff97c120000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe97c020010001; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefe7f; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefe7f; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010081; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100018080; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010110; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010110; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c +new file mode 100644 +index 000000000..2b8327d91 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c +@@ -0,0 +1,317 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff00ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff00ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x01010101fe01fe01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x01010101fe01fe01; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_result[1]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_result[0]) = 0x2000200020002000; ++ __m256i_out = __lasx_xvbitrevi_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7ff77fff7ff7; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7ff77fff7ff7; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000020001; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010121011; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000020000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000020000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000020000000000; ++ __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x29); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001c4e8ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001c4e8ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0081c4e8ff7fffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0081c4e8ff7fffff; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f017ffd; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f017ffd; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002080100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002080100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000008000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000a080100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000008000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000a080100; ++ __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100010001000100; ++ __m256i_out = __lasx_xvbitrevi_h (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_result[3]) = 0xfee1057c01e10581; ++ *((unsigned long *)&__m256i_result[2]) = 0x011ec1210161057b; ++ *((unsigned long *)&__m256i_result[1]) = 0xfee1057c01e10581; ++ *((unsigned long *)&__m256i_result[0]) = 0x011ec1210161057b; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long *)&__m256i_result[2]) = 0xe27fe2821d226278; ++ *((unsigned long *)&__m256i_result[1]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long *)&__m256i_result[0]) = 0xe27fe2821d226278; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000008; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[3]) = 0x080808000828082f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080008280820; ++ *((unsigned long *)&__m256i_result[1]) = 0x080808000828082f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080008280820; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitrevi_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000800000000000; ++ __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0200000002000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x02000000fdffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0200000002000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x02000000fdffffff; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffeffed; ++ __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_result[2]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_result[1]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_result[0]) = 0xc03b000200020002; ++ __m256i_out = __lasx_xvbitrevi_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff81007fff0100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff81007fff0100; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c +new file mode 100644 +index 000000000..c9847a615 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c +@@ -0,0 +1,134 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001f001f02c442af; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001f001f02c442af; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe01f000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe01f000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_result[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000c40086; ++ __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbe21000100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000505300000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbe21000100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000505300000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00005053000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00005053000000ff; ++ __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000040000; ++ __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c +new file mode 100644 +index 000000000..1edb4fca2 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c +@@ -0,0 +1,185 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xef); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xcd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffd10000006459; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000441000000004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000040400000104; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdb801b6d0962003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000007fff01ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdb8e209d0cce025a; ++ *((unsigned long *)&__m256i_result[3]) = 0x88888a6d0962002e; ++ *((unsigned long *)&__m256i_result[2]) = 0xdb8a3109fe0f0020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000007fff01fffb; ++ *((unsigned long *)&__m256i_result[0]) = 0xdb8e20990cce025a; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x88); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000002b902b3e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000002b902b3e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000002a102a3a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000002a102a3a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xd9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000090909090; ++ *((unsigned long *)&__m256i_result[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000090909090; ++ *((unsigned long *)&__m256i_result[0]) = 0x9090909090909090; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x95); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_result[2]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_result[1]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_result[0]) = 0x4545454545454545; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x4d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x21bb481000ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01bf481000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x21bb481000ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01bf481000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xb1b3b1b1b1b7b1b1; ++ *((unsigned long *)&__m256i_result[2]) = 0xb1b7b1b1b1b1b1b1; ++ *((unsigned long *)&__m256i_result[1]) = 0xb1b3b1b1b1b7b1b1; ++ *((unsigned long *)&__m256i_result[0]) = 0xb1b7b1b1b1b1b1b1; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xb7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc03fc03fc03fc03f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc03fc03fc03fc03f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000002d; ++ *((unsigned long *)&__m256i_result[2]) = 0xc02dc02dc02dc02d; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000002d; ++ *((unsigned long *)&__m256i_result[0]) = 0xc02dc02dc02dc02d; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xed); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x60600000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x6060000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x60600000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x6060000000000000; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x60); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c +new file mode 100644 +index 000000000..c195cd91c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c +@@ -0,0 +1,620 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff000000010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000095120000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc9da000063f50000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc7387fff6bbfffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffdffffffc81aca; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff3a0b9512; ++ *((unsigned long *)&__m256i_op1[1]) = 0x280bc9db313a63f5; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe032c738adcb6bbb; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800001010400; ++ *((unsigned long *)&__m256i_result[2]) = 0x000180009d120004; ++ *((unsigned long *)&__m256i_result[1]) = 0xc9da080067f50020; ++ *((unsigned long *)&__m256i_result[0]) = 0xc73c7fff6bbfffff; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffff8046867f79; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffff328dfff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6651bfff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00010001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00010001; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00001f41ffffbf00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x010180068080fff9; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x3ff1808001020101; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0800000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010103; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000040000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000000010000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000040000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000040000010; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbea2e127c046721f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1729c073816edebe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xde91f010000006f9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ef1f90efefaf30d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000060000108; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001060005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fef0001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xbfa3e127c147721f; ++ *((unsigned long *)&__m256i_result[2]) = 0x1729c173836edfbe; ++ *((unsigned long *)&__m256i_result[1]) = 0xdf91f111808007fb; ++ *((unsigned long *)&__m256i_result[0]) = 0x5ff1f90ffffbf30f; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_result[2]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_result[1]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_result[0]) = 0xe161616161614f61; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01010101010000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808280808082; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808280808082; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808280808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x8080808280808082; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000082f8989a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000d58f43c8; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010183f9999b; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x01010101d58f43c9; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ffe7ffd7ffe7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ffe7ffd7ffe8001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0707feb70707b7d1; ++ *((unsigned long *)&__m256i_result[2]) = 0x65baa7efea95a985; ++ *((unsigned long *)&__m256i_result[1]) = 0x0707feb70707b7d1; ++ *((unsigned long *)&__m256i_result[0]) = 0x65baa7ef6a95a987; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x41cc5bb8a95fd1eb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x41cc5bb8a95fd1eb; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7b7b7b7b80000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xcacacb1011040500; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7b7b7b7b80000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xcacacb1011040500; ++ *((unsigned long *)&__m256i_result[3]) = 0x49cc5bb8a95fd1eb; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ff4080102102001; ++ *((unsigned long *)&__m256i_result[1]) = 0x49cc5bb8a95fd1eb; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff4080102102001; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010401; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010401; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010401; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010401; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0xdf01010153a10101; ++ *((unsigned long *)&__m256i_result[2]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long *)&__m256i_result[1]) = 0xdf01010153a10101; ++ *((unsigned long *)&__m256i_result[0]) = 0x5b7f01ff5b7f10ff; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000080000001000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000080000001000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_op0[2]) = 0x23222120171e151c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_op0[0]) = 0x23222120171e151c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x201fdfe0201fdfe0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x201fdfe0201fdfe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010127272525; ++ *((unsigned long *)&__m256i_result[2]) = 0x23a2a121179e951d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010127272525; ++ *((unsigned long *)&__m256i_result[0]) = 0x23a2a121179e951d; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x800080008000ffee; ++ *((unsigned long *)&__m256i_result[2]) = 0x800080008000ffee; ++ *((unsigned long *)&__m256i_result[1]) = 0x800080008000ffee; ++ *((unsigned long *)&__m256i_result[0]) = 0x800080008000ffee; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000100010001ffff; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00010000fffe0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00010000fffe0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00010000fffe0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00010000fffe0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x01010101010101c9; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000affff800b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000affff800b; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000affff800b; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000affff800b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000800; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000400010004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000400010004; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000f0001000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000f0001000d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000f0001000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000f0001000d; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f010000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f010000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f010100000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f010100000101; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x498100814843ffe1; ++ *((unsigned long *)&__m256i_result[2]) = 0x4981008168410001; ++ *((unsigned long *)&__m256i_result[1]) = 0x498100814843ffe1; ++ *((unsigned long *)&__m256i_result[0]) = 0x4981008168410001; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000090b0906; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100002000; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op0[2]) = 0x03acfc5303260e80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op0[0]) = 0x03acfc5303260e80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_result[3]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_result[2]) = 0x03acfc5303260e81; ++ *((unsigned long *)&__m256i_result[1]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_result[0]) = 0x03acfc5303260e81; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c +new file mode 100644 +index 000000000..47f37e4b3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c +@@ -0,0 +1,405 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcf800fffcf800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffcf800fffcf800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007f7f00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000040000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f7f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000040000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f7f00007fff; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202020202020202; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000000; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000004000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000004000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000004000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000004000000; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000013; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000013; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001000000fb; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808180808093; ++ *((unsigned long *)&__m256i_result[2]) = 0x80808081808080fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808180808093; ++ *((unsigned long *)&__m256i_result[0]) = 0x80808081808080fb; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000020; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010000000100000; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_result[3]) = 0x1000100054445443; ++ *((unsigned long *)&__m256i_result[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_result[1]) = 0x1000100054445443; ++ *((unsigned long *)&__m256i_result[0]) = 0x7bbbbbbbf7777778; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffa2078fffa2074; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffa2078fffa2074; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffeffebfb7afb62; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[3]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[2]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[1]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[0]) = 0xe7e7e7e7e7e7e7e7; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020206431; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0003030300000300; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0003030300000300; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0003030300000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0003030300000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0043030300400300; ++ *((unsigned long *)&__m256i_result[2]) = 0x0043030300400300; ++ *((unsigned long *)&__m256i_result[1]) = 0x0043030300400100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0043030300400100; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x223d76f09f3881ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x223d76f09f37e357; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ec0a1b2aba7ed0; ++ *((unsigned long *)&__m256i_result[3]) = 0x223d76f09f3881ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x3870ca9d013e76b0; ++ *((unsigned long *)&__m256i_result[1]) = 0x223d76f09f37e357; ++ *((unsigned long *)&__m256i_result[0]) = 0x43ec0a1b2aba7ed0; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f780000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f8f7f80000fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f780000ff80; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003f784000ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f8f7f84000fff9; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f784000ff80; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe1ffe0ffe1ffe0; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffefef800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffefef800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000008000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffefef800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000008000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffefef800; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000000020000; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[2]) = 0x00020002000230ba; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[0]) = 0x00020002000230ba; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x8100810081008100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x8100810081008100; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007878; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007878; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000107878; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000107878; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256i_result[3]) = 0x4000400140004001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffff2f640006f48; ++ *((unsigned long *)&__m256i_result[1]) = 0x4000400140004001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffff2f640006f48; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[3]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_result[2]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_result[1]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_result[0]) = 0xfd12fd12fd12fd12; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c +new file mode 100644 +index 000000000..dbc52f92b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c +@@ -0,0 +1,449 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffd1b24e00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcea54ffff29a8; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff8cad88ff8306b4; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffc1278fffce4c8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0802010000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0806030008080001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0801010108010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0806000008060302; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfafafafafafafafa; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fefefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fff0fff00000020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0fff0fff00000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fc03fc01fc03fc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fc03fc01fc03fc; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000200000001e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000200000001e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000808; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd04752cdd5543b56; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6906e68064f3d78b; ++ *((unsigned long *)&__m256i_op0[1]) = 0xd04752cdd5543b56; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6906e68064f3d78b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000300000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000300000002; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc0000000c0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc000000080400000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc0000000c0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc000000080400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000000010000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010000100000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000004000000020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000004000000020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000100010; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c +new file mode 100644 +index 000000000..89191c467 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c +@@ -0,0 +1,504 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x04481940fbb7e6bf; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf2781966e6991966; ++ *((unsigned long *)&__m256i_op0[1]) = 0x51258839aeda77c6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xcf25f0e00f1ff0e0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0501030100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001030100000301; ++ *((unsigned long *)&__m256i_result[1]) = 0x0102000200000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000004030000; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000f0000000f; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000900000000; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvclz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080807; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080807; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100001; ++ __m256i_out = __lasx_xvclz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008000000080000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008000000080000; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvclz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000018; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000019; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000200000001e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000019; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0b085bfc00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0b004bc000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0b085bfc00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0b004bc000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0404010008080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0408010008080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0404010008080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0408010008080808; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000012; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0404010008080808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0408010008080808; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0404010008080808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0408010008080808; ++ *((unsigned long *)&__m256i_result[3]) = 0x0505070804040404; ++ *((unsigned long *)&__m256i_result[2]) = 0x0504070804040404; ++ *((unsigned long *)&__m256i_result[1]) = 0x0505070804040404; ++ *((unsigned long *)&__m256i_result[0]) = 0x0504070804040404; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002ffff0000ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002ffff0000ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000000d; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000d; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000e; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000032; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000003c000000032; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x001000100010000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x001000060010000a; ++ __m256i_out = __lasx_xvclz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000c; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000008080800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000008080800; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004001000100004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004000400100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004001000100004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000400100010; ++ __m256i_out = __lasx_xvclz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000020; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000007f8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000029; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000029; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000027; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c +new file mode 100644 +index 000000000..d2e742e81 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c +@@ -0,0 +1,526 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x639c3fffb5dffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb8c7800094400001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000e000c000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0009000100040001; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004000400040805; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004000400040805; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004000400040805; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000400040805; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcf800fffcf800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0008000800000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0806050008060500; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000800000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000000100; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002e2100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000040002; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x34000000fff00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff6e00000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3380000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x363c0000fff3c000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000030000000c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000500000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000010; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00c100c100c100c1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00c100c100c100c1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000080800000808; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100000100000001; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000020; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000001555; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000015554001c003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000001555; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000015554001c003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000304; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000030401010202; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000304; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000030401010202; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000a0008; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000030000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000030000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000030000; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000040000001b; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000040000001b; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000b000b000b000b; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001f00000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001f00000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001200000012; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvext2xv-xvexth-x.patch b/LoongArch-Add-tests-for-ASX-vector-xvext2xv-xvexth-x.patch new file mode 100644 index 0000000000000000000000000000000000000000..b633c301813295c0ccf9e2c44ee167cd489ac720 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvext2xv-xvexth-x.patch @@ -0,0 +1,4600 @@ +From 5cf957f25df755431bc77845fecb5bec0624c097 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 16:51:19 +0800 +Subject: [PATCH 118/124] LoongArch: Add tests for ASX vector + xvext2xv/xvexth/xvextins/xvilvh/xvilvl/xvinsgr2vr/ xvinsve0/xvprem/xvpremi + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvextrins.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvilvh.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvilvl.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvprem.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvpremi.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvext2xv-1.c | 515 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvext2xv-2.c | 669 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvexth-1.c | 350 +++++++++ + .../loongarch/vector/lasx/lasx-xvexth-2.c | 592 ++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvextrins.c | 515 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvilvh.c | 530 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvilvl.c | 620 ++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvinsgr2vr.c | 272 +++++++ + .../loongarch/vector/lasx/lasx-xvinsve0.c | 380 ++++++++++ + .../loongarch/vector/lasx/lasx-xvprem.c | 20 + + .../loongarch/vector/lasx/lasx-xvpremi.c | 20 + + 11 files changed, 4483 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c +new file mode 100644 +index 000000000..94f31019c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2b2b2b2b1bd5d5d6; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2a2a2a2af2d5d5d6; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2b2b2b2b1bd5d5d6; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2a2a2a2af2d5d5d6; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002a0000002a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002a0000002a; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff2ffffffd5; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffd5ffffffd6; ++ __m256i_out = __lasx_vext2xv_w_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_w_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff0; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000017f; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff00fff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff00fffffff0; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffe20; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001dfffffe1f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0200000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0200000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000498000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x00004843ffffffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000498000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000684000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000017; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00020002ff820002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00020002ff820002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffff82; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x03fbfffc03fc07fc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x03fbfffc03fc07fc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd100645944100004; ++ *((unsigned long *)&__m256i_op0[2]) = 0xd1908469108400d1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000404040104; ++ *((unsigned long *)&__m256i_op0[0]) = 0xd1108199714910f9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000004040104; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffd1108199; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000714910f9; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c +new file mode 100644 +index 000000000..d93201bc4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c +@@ -0,0 +1,669 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x639c3fffb5dffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb8c7800094400001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0063009c003f00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00b500df00ff00fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x00b800c700800000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0094004000000001; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00aa00ab00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00aa00ab00ff00ff; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01ff01ff01c0003e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01ff01ff01c0003e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000100ff000100ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000100c00000003e; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000f0001000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000f0001000d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000f0001000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000f0001000d; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000010000000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000000f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000010000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000000d; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000005f000000f0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000f9; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000f3; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000781; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000029; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000029; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fd; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff7fff7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff7f027f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff7f0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00fe7f027f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000007f; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000003fbfc04; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001fdfe02; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000003fbfc04; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001fdfe02; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fd; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ef; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000002e0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000002e0000fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000002e; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000002e; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000002e; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000fffe; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcfee0fe00ffe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffcfee0fe00ffe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffc0000fee0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fe000000ffe0; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000001b; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000001b; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000fd00000000; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000017f7f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000017f7f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000017f00007f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff0000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff0000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff00000000ff; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000498000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x000048430000ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000498000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000684000000000; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff7eddffff7ed3; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff7edfffff7edf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff7eddffff7ed3; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff7edfffff7edf; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00007edd; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00007ed3; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00007edf; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00007edf; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000801380f380fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000801380f300fb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008013; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000080f3; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fb; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000007f; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c +new file mode 100644 +index 000000000..9fb4e3ff0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c +@@ -0,0 +1,350 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[3]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_result[2]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_result[1]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_result[0]) = 0x005500550055ffab; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff010ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff010ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffec; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffebd8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffec; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffebd8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffec; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffec; ++ __m256i_out = __lasx_xvexth_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvexth_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffff1cffffff1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff1cffffff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffff1cffffff1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff1cffffff1c; ++ __m256i_out = __lasx_xvexth_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001010101; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000007f00340040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000007f000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffec; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffec; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00080000002c0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0008000000080000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00080000002c0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0008000000080000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00080000002c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00080000002c0000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c +new file mode 100644 +index 000000000..fe6ff15d8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c +@@ -0,0 +1,592 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000045f3fb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000045f3fb; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004500f300fb; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x004100df00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00c000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x004100df00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00c000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f00ff007f00ff; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000200010002; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0080000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffff8046867f79; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffff328dfff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6651bfff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000f3280000dfff; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000fffe; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffa30000165a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000104000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffa30000165a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000104000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000165a; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001010600000106; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001010600000106; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007cfcfd80000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007cfcfd80000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000020ff790020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000020ff790020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ff03fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffec75c2d209f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ff03fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffec75c2d209f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000001ff000003fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000001ff000003fe; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010100000101; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256i_op0[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe3aebaf4df958004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x36722a7e66972cd6; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff5f5c; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000005e; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c +new file mode 100644 +index 000000000..8e61f1c6d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000020202; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000002020202; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000020200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff49fe4200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbf28b0686066be60; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff49fe4200000000; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0xbf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0xfe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0x9f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0xc4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0x99); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000fffffefc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000fffffffe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000fffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000fffffffff; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x8f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op1[1]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000061; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000061; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x83); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007bbbbbbb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007bbbbbbb; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x8d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xda); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff900000800; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007f7f00007f00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0x87); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xa5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2b2a292827262524; ++ *((unsigned long *)&__m256i_op0[2]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2b2a292827262524; ++ *((unsigned long *)&__m256i_op0[0]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_result[2]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_result[0]) = 0x232221201f1e1d1c; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0xbd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000080000000; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0x33); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0xb8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x54); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0xe7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00010001000100; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0x7b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0x56); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff01; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x6f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000001010100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000405; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000001010100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000405; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xf6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000007f8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000007f8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x7b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff00fff8ffc0; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0x82); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002000000; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x43); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff007ffd61; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff007ffd61; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe97c020010001; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x62); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c +new file mode 100644 +index 000000000..5a047a508 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c +@@ -0,0 +1,530 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xbff0800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xbff0800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffff90ffffff81; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffff90ffffff81; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000307fffe72e800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020200008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0008010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000001010000; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5555555580000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5555555580000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x555555553f800000; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000003f00000000; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x419cd5b11c3c5654; ++ *((unsigned long *)&__m256i_op1[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x419cd5b11c3c5654; ++ *((unsigned long *)&__m256i_result[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_result[2]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_result[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_result[0]) = 0x6580668200fe0002; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000004000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000004000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff04ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff04ff00ff00ff00; ++ __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000003f00390035; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8015003f0006001f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000003f00390035; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8015003f0006001f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x80000000001529c1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80007073cadc3779; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80000000001529c1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80007073cadc3779; ++ *((unsigned long *)&__m256i_result[3]) = 0x00008000003f0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00390015003529c1; ++ *((unsigned long *)&__m256i_result[1]) = 0x00008000003f0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00390015003529c1; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020002000200020; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000002c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000002c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000002c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000002c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000002c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000002c0000; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7eeefefefefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7eeefefefefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7e00ee00fe00fe00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfe00fe00fe00fe00; ++ *((unsigned long *)&__m256i_result[1]) = 0x7e00ee00fe00fe00; ++ *((unsigned long *)&__m256i_result[0]) = 0xfe00fe00fe00fe00; ++ __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xaad5555500000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xaad5555500000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001fff200007ef; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff7bfffff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff80007fe9; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff7bfffff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff80007fe9; ++ *((unsigned long *)&__m256i_result[3]) = 0x40ff40ff40ff40ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x407b40ff40ff40f1; ++ *((unsigned long *)&__m256i_result[1]) = 0x40ff40ff40ff40ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x407b40ff40ff40f1; ++ __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0001fffa; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe00018069; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0001fffa; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe00018069; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff01fffffffeff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff01fffffffaff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff01fffffffeff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff01fffffffaff; ++ __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00001ff8d8d8c000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00001ff8d8d90000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00001ff8d8d8c000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00001ff8d8d90000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001ff800000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xd8d8c00000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001ff800000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xd8d8c00000000000; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080ff0080; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080ff0080; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001ff03ff; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000019ffdf403; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000011ffd97c3; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000019ffdf403; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000011ffd97c3; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000019ffdf403; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000019ffdf403; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe0ffe000000000; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c +new file mode 100644 +index 000000000..4393045c3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c +@@ -0,0 +1,620 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0000fffe0000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000fefc0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000fffe0000; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefdfffffefd; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f007f78; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f00007f7f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f00fffb7f78fffc; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8080808080808081; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8080808080808081; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000808000008080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000808000008081; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff01fffffffeff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff01fffffffeff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff01fffffffeff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff01fffffffeff; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[2]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[0]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x07efefefefefefee; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffffffff; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00f300ff00f3; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00f300ff00f3; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00f300ff00f3; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00f300ff00f3; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00040000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00fe00fe; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007c000000810081; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007c000000810081; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x007c7fff00007fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00817fff00810000; ++ *((unsigned long *)&__m256i_result[1]) = 0x007c7fff00007fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00817fff00810000; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000001d001d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001d0000001d; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000e0e0e0e0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000070007000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe070e000e070e000; ++ *((unsigned long *)&__m256i_result[2]) = 0xe070e000e070e000; ++ *((unsigned long *)&__m256i_result[1]) = 0xe070e000e070e000; ++ *((unsigned long *)&__m256i_result[0]) = 0xe070e000e070e000; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f003f003f0040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f003f003f0040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x003f003f003f0040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x003f003f003f0040; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f3f00004040; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffe98; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000e000e; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000a0080000b00; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000a0080000b00; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000a0080000b00; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000a0080000b00; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe01fe01fd02fd02; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe01fe01fd02fd02; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3f00c0003f00c000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3f00c0003f00c000; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[0]) = 0x4980008068400000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf000f000f000f000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf000f010f000f010; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf000f000f000f000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf000f010f000f010; ++ *((unsigned long *)&__m256i_result[3]) = 0x00f0000000f00010; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff0ff00fff0ff10; ++ *((unsigned long *)&__m256i_result[1]) = 0x00f0000000f00010; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff0ff00fff0ff10; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffed; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffed; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c +new file mode 100644 +index 000000000..ce28c4857 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c +@@ -0,0 +1,272 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x146014141414146e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf19998668e5f4b84; ++ long_op1 = 0x0000007942652524; ++ *((unsigned long *)&__m256i_result[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000007942652524; ++ *((unsigned long *)&__m256i_result[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_result[0]) = 0xf19998668e5f4b84; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202020201010000; ++ int_op1 = 0x00000045eef14fe8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202020201010000; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x012e2110012e2110; ++ int_op1 = 0x00000000000000ac; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000ac; ++ *((unsigned long *)&__m256i_result[0]) = 0x012e2110012e2110; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff800000ff800000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff80000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff0000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff0000ff; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff0000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff0000ff; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe800c000fffeeece; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff383efffedf0c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe800c000fffeeece; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff383efffedf0c; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xe800c000fffeeece; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff383e000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0xe800c000fffeeece; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff383efffedf0c; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0020000000200000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000048; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000048; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000048; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff7fffffff7; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff700000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff7fffffff7; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ int_op1 = 0x00000000090b0906; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000090b0906; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000000000001e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001e00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000050005; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefefe; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c +new file mode 100644 +index 000000000..644d2ce4b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c +@@ -0,0 +1,380 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000050005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffefe00000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000170017; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffefffffffe; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff000200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff000200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x001f00e0ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x001f00e0ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff80000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000200000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[0]) = 0x9090909090909090; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000040b200002fd4; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007fff0000739c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000040b200002fd4; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007fff0000739c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000739c; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op1[2]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op1[0]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_result[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x6040190d00000000; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x03fbfffc03fc07fc; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x03fbfffc03fc07fc; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000ffff0000ffff; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff8001ffff0001; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c +new file mode 100644 +index 000000000..9346f9bfb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c +@@ -0,0 +1,20 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c +new file mode 100644 +index 000000000..9346f9bfb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c +@@ -0,0 +1,20 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvextl-xvsra-xvsr.patch b/LoongArch-Add-tests-for-ASX-vector-xvextl-xvsra-xvsr.patch new file mode 100644 index 0000000000000000000000000000000000000000..17c25a0a16573954b090ac75ba7332663e92e970 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvextl-xvsra-xvsr.patch @@ -0,0 +1,4737 @@ +From bf5805833fc26d26a1fbbdc7dfe10109c0c676f9 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 15:49:41 +0800 +Subject: [PATCH 107/124] LoongArch: Add tests for ASX vector + xvextl/xvsra/xvsran/xvsrarn instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsra.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrai.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsran.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrani.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrar.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrari.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvextl-1.c | 86 +++ + .../loongarch/vector/lasx/lasx-xvextl-2.c | 163 ++++ + .../loongarch/vector/lasx/lasx-xvsra.c | 545 +++++++++++++ + .../loongarch/vector/lasx/lasx-xvsrai.c | 504 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvsran.c | 455 +++++++++++ + .../loongarch/vector/lasx/lasx-xvsrani.c | 545 +++++++++++++ + .../loongarch/vector/lasx/lasx-xvsrar.c | 725 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvsrari.c | 471 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvsrarn.c | 500 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvsrarni.c | 636 +++++++++++++++ + 10 files changed, 4630 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c +new file mode 100644 +index 000000000..c0d3e8e75 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c +@@ -0,0 +1,86 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x43ef878780000009; ++ __m256i_out = __lasx_xvextl_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000201220001011c; ++ __m256i_out = __lasx_xvextl_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c +new file mode 100644 +index 000000000..8c7ab4ed3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c +@@ -0,0 +1,163 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000083f95466; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010100005400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000083f95466; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010100005400; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c +new file mode 100644 +index 000000000..2bf9ae9c3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c +@@ -0,0 +1,545 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc5890a0a07070707; ++ *((unsigned long *)&__m256i_op1[2]) = 0x006be0e4180b8024; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1b399540334c966c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x71d7dd7aefcac001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe651bfff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffe651bfff; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffe0000000; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000bf6e0000c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000030000fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000bf6e0000c916; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000030000fff3; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff0e400; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9cffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9cffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1cfd000000000000; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000017e007ffe02; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_result[2]) = 0x6161616100000018; ++ *((unsigned long *)&__m256i_result[1]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_result[0]) = 0x6161616100000018; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f007f0081007f; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe01ae00ff00ff; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fe1ffe0ffe1ffe0; ++ __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0007000700070007; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6040190d20227a78; ++ *((unsigned long *)&__m256i_op0[1]) = 0x132feeabd2d33b38; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000c0300000019a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0c08032100004044; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000265ffa5a6767; ++ *((unsigned long *)&__m256i_result[0]) = 0x0c08032100000000; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f433c78; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00feff0100feff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00feff0100feff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800300000000; ++ __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000017fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000017fff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000f00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000f00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c +new file mode 100644 +index 000000000..a51be899b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c +@@ -0,0 +1,504 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256i_op0[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe3aebaf4df958004; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x006be0e4180b0024; ++ *((unsigned long *)&__m256i_result[1]) = 0x1b39153f334b166b; ++ *((unsigned long *)&__m256i_result[0]) = 0xf1d7dd7aefcac002; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x36); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[3]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_result[2]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_result[1]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_result[0]) = 0x1555156a1555156a; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000bea20000e127; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000c0460000721f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000de910000f010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000006f9; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000bea20; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000c0460; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000de910; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffff800fffff800; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffff800fffff800; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffff800fffff800; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffff800fffff800; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007f017f01; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007f017f01; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000007f017f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000007f017f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fff01800fff0181; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0fff01800fff0181; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0007ff800007ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0007ff800007ff80; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000000000000; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000f91; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000f91; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ffff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffc03fffffffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffc00000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffc03fffffffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffc00000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000007ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000007ffffffff; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000fef0ff0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000fef0ff0; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc008fa01c0090000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3f804000c008f404; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc008fa01c0090000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f804000c008f404; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_result[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_result[0]) = 0x001fc0200060047a; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fffe00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fffe00000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_result[3]) = 0x1e9e1e9e1e9e1e9e; ++ *((unsigned long *)&__m256i_result[2]) = 0x1e9e1e9e1e9e1e9e; ++ *((unsigned long *)&__m256i_result[1]) = 0x1e9e1e9e1e9e1e9e; ++ *((unsigned long *)&__m256i_result[0]) = 0x1e9e1e9e1e9e1e9e; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffc0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0005fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x04f004f204f204f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0005fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x04f004f204f204f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000900000009; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x761ed60b5d7f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdc9938afafe904f1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x761ed60b5d7f0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdc9938afafe904f1; ++ *((unsigned long *)&__m256i_result[3]) = 0x03b0feb002eb0000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfee401c5fd7f0027; ++ *((unsigned long *)&__m256i_result[1]) = 0x03b0feb002eb0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfee401c5fd7f0027; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c +new file mode 100644 +index 000000000..e08934b12 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c +@@ -0,0 +1,455 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000013ffffffec; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000013ffffebd8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000013ffffffec; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000013ffffebd8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000807e7ffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8091811081118110; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80a6802680208015; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8091811081110013; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80a6802680200018; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffe0000feff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffeff0000007e7f; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000800000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000000000000; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001fe01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fe01fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000c8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000c8; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000440800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000440800; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000405; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000405; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfc01fc0101fe01dd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfc01fc0101fe01dd; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000055; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000054; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c +new file mode 100644 +index 000000000..44c20a954 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c +@@ -0,0 +1,545 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000003ffffffff; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe1e800002f03988d; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe1e800002f03988d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff0f400001781cc4; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff0f400001781cc4; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c4c5c5c5c5c5c5; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc5c545c545c545c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c4c5c5c5c5c5c5; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc5c545c545c545c5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000ff000000f8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbc8ff0ffffffcff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000f8; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbc8ff0ffffffcff8; ++ *((unsigned long *)&__m256i_result[3]) = 0xfcfcfcfcfc040404; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fbfffffc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfcfcfcfcfc040404; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fbfffffc; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x14131211100f0e0d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0c0b0a0908070605; ++ *((unsigned long *)&__m256i_op0[1]) = 0x14131211100f0e0d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0c0b0a0908070605; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0a09080706050403; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0a09080706050403; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x40); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000242; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000242; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0707feb608c9328b; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc237bd65fc892985; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0707feb608c9328b; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc237bd65fc892985; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00150015003a402f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x333568ce26dcd055; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00150015003a402f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x333568ce26dcd055; ++ *((unsigned long *)&__m256i_result[3]) = 0x0e0f1192846ff912; ++ *((unsigned long *)&__m256i_result[2]) = 0x002a0074666a4db9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0e0f1192846ff912; ++ *((unsigned long *)&__m256i_result[0]) = 0x002a0074666a4db9; ++ __m256i_out = __lasx_xvsrani_h_w (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffdfffffffdff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffdfffffffdff; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x37); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8080808000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8080808000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3f7f7f7eff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3f7f7f7eff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007efeff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007efeff00; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff3e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff3e; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x70); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002000200020018; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002000200020008; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00c0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0040000000000000; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrani_h_w (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000f0f0003; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000f1003; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fefefe000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fefefe000000; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000781; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x0008080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000003c; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x45); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00f3009500db00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00f3009500db00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000003cc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000003cc0; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x6a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000400100013; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000400100014; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000400100013; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020200000202; ++ *((unsigned long *)&__m256i_result[2]) = 0x4100004141410000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000020200000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4100004141410000; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000956a00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000956a00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xb500000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xb500000000000000; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x29); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000001010100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000405; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000001010100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000405; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe00000ffe00000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe00000ffe00000; ++ __m256i_out = __lasx_xvsrani_h_w (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_w_d (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_w_d (__m256i_op0, __m256i_op1, 0x34); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffc0; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffff80; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_h_w (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x6b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000040e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000040e7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000200000000000; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x21); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3ff9fffa3ff9fffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3ff9fffa3ff9fffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007ff3; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007ff3; ++ __m256i_out = __lasx_xvsrani_w_d (__m256i_op0, __m256i_op1, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c +new file mode 100644 +index 000000000..fb47385c0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c +@@ -0,0 +1,725 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x38a966b31be83ee9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5f6108dc25b80001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf41a56e8a20878d7; ++ *((unsigned long *)&__m256i_op0[0]) = 0x683b8b67e20c0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000501e99b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000109973de7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001020f22; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001890b7a39; ++ *((unsigned long *)&__m256i_result[3]) = 0x38a966b301f41ffd; ++ *((unsigned long *)&__m256i_result[2]) = 0x5f6108ee13ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf41a56e8d10201f6; ++ *((unsigned long *)&__m256i_result[0]) = 0x683b8b34f1020001; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000707; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010200000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000070300000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01480000052801a2; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffdcff64; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000001010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000001010000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0020000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0020000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01ff3400000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff83ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffcc8000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff82037dfd0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_result[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_result[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_result[0]) = 0x45baa7ef6a95a985; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000800; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d0000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000001a00; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffff0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fefffeff02ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000100; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00feff00000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2b2a292827262524; ++ *((unsigned long *)&__m256i_op1[2]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2b2a292827262524; ++ *((unsigned long *)&__m256i_op1[0]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8e8e8e8e8f0e8e8e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8e8e8e8e8f0e8e8e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7171717171010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x8e8e8e8e8f00ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7171717171010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x8e8e8e8e8f00ffff; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001607f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001607f0000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x43ef878780000009; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00005053000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00005053000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[3]) = 0x006018000000001a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0060401900000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x006018000000001a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0060401900000000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000040404040; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ffffff1dff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff1dffffff1dff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ffffff1dff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff1dffffff1dff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff1dffffff1dff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff1dffffff1dff; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c +new file mode 100644 +index 000000000..63ba92ead +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c +@@ -0,0 +1,471 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x81f7f2599f0509c2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x51136d3c78388916; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffc0fcffffcf83; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000288a00003c1c; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256i_op0[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe3aebaf4df958004; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100020001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fffffffffffe; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00001f41ffffbf00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00007dfd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00007dfd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x22); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x20fc000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x20fc000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007f0000007f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007f0000007f0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x10fbe1e2e0000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x10fbe1e2e0000002; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000040004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000040004; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff8000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x26); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000400000004000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff81007fff0100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff81007fff0100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000008000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0003fffc0803fff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000008000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0003fffc0803fff8; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c +new file mode 100644 +index 000000000..c145f7ff3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c +@@ -0,0 +1,500 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00080000000cc916; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000006fff3; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ffff00ff000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00080005c073c916; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000100000007fff3; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00050008000e0010; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0007000800100010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00050008000e0010; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0007000800100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000002affaa; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff002affaa; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000002affaa; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffd50055; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x002affaa00000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001f0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00007f7f00007f00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000007fff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000007fff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f00ff00000000; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000abff0000abff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000abff0000abff; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff800000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000070007000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4040403fd03fd040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4040403fd03fd040; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffd03fd040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4040403fd03fd040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001010000010100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000010100; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000086000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00040ff288000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000086000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00040ff288000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op1[1]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fc300000fc40; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc008fa01c0090000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3f804000c008f404; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc008fa01c0090000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f804000c008f404; ++ *((unsigned long *)&__m256i_op1[3]) = 0x82ff902d83000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f80000082fe0bd9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x82ff902d83000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f80000082fe0bd9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc0090000c0200060; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc0090000c0200060; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf3f3f3f3f3f3f4f3; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf3f3f3f3f3f3f4f3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000f3f3f4f3; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000f3f3f4f3; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff8579f; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefe01010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefe01010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefe01010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefe01010101; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000810001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000810001; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010110; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010110; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000104000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000104000200; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c +new file mode 100644 +index 000000000..b5c0fca74 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c +@@ -0,0 +1,636 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000007f007f5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x002e4db200000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000315ac0000d658; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00735278007cf94c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003ed8800031b38; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff8fffffff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff8fc000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7ff77fff7ff7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7ff77fff7ff7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000022; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000022; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000004; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x22); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000016600000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000016600000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x7f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000055; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000055; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x50); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x20); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00550f0000550f00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000015c015c0; ++ *((unsigned long *)&__m256i_result[2]) = 0xc0c0c0cdc0c0c0cd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc0c0c0cdc0c0c0cd; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0003030300000300; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0003030300000300; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0003030300000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003030300000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x00f800f800f800f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0018181800181818; ++ *((unsigned long *)&__m256i_result[1]) = 0x00f800f800f800f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0018181800181818; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x43d03bfff827ea21; ++ *((unsigned long *)&__m256i_op1[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43e019c657c7d050; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xe8001411edf9c0f8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xe80014fdf0e3e428; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0fff0ff01ff14; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff10003; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0fff0ff01ff14; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff10003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefee0e3fefefe00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefee0e3fefefe00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000001fffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000001fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000001fffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000001fffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x007f0000007f0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x007f0000007f0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f780000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f8f7f80000fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f780000ff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x01c601c6fe3afe3a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x01c601c6fe3afe3a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f3f00004040; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f010700c70106; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f010700c70106; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000010211921; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000010211921; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x82ff902d83000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f80000082fe0bd9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x82ff902d83000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f80000082fe0bd9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000080ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000080ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x08000000000000f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x08000000000000f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff8; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x4); ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op0[2]) = 0x03acfc5303260e81; ++ *((unsigned long *)&__m256i_op0[1]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op0[0]) = 0x03acfc5303260e81; ++ *((unsigned long *)&__m256i_op1[3]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op1[2]) = 0x03acfc5303260e81; ++ *((unsigned long *)&__m256i_op1[1]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op1[0]) = 0x03acfc5303260e81; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvfcmp-caf-ceq-cl.patch b/LoongArch-Add-tests-for-ASX-vector-xvfcmp-caf-ceq-cl.patch new file mode 100644 index 0000000000000000000000000000000000000000..c56e27e86a67f97f89566c6f2cb52a0eb4a2c739 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvfcmp-caf-ceq-cl.patch @@ -0,0 +1,4510 @@ +From ab8716fe8109c738ac02b641160350d2b351466b Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 16:45:33 +0800 +Subject: [PATCH 116/124] LoongArch: Add tests for ASX vector + xvfcmp{caf/ceq/cle/clt/cne/cor/cun} instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvfcmp_caf_s.c | 446 ++++++++ + .../loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c | 977 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvfcmp_cle_s.c | 759 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvfcmp_clt_s.c | 675 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvfcmp_cne_s.c | 872 ++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvfcmp_cor_s.c | 340 ++++++ + .../loongarch/vector/lasx/lasx-xvfcmp_cun_s.c | 361 +++++++ + 7 files changed, 4430 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c +new file mode 100644 +index 000000000..fa3372358 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c +@@ -0,0 +1,446 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xff56ff55; ++ *((int *)&__m256_op0[4]) = 0xff01ff01; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xff56ff55; ++ *((int *)&__m256_op0[0]) = 0xff01ff01; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x0000abff; ++ *((int *)&__m256_op1[4]) = 0x0000abff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x0000abff; ++ *((int *)&__m256_op1[0]) = 0x0000abff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000001; ++ *((int *)&__m256_op0[4]) = 0x0000000a; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000001; ++ *((int *)&__m256_op0[0]) = 0x0000000a; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000040; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x5d20a0a1; ++ *((int *)&__m256_op1[6]) = 0x5d20a0a1; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x5d20a0a1; ++ *((int *)&__m256_op1[2]) = 0x5d20a0a1; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0003ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffff8000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffff8000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffff8000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffff8000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xe07de080; ++ *((int *)&__m256_op0[4]) = 0x1f20607a; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xe07de080; ++ *((int *)&__m256_op0[0]) = 0x1f20607a; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xe07de080; ++ *((int *)&__m256_op1[4]) = 0x1f20607a; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xe07de080; ++ *((int *)&__m256_op1[0]) = 0x1f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xe07de080; ++ *((int *)&__m256_op1[4]) = 0x1f20607a; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xe07de080; ++ *((int *)&__m256_op1[0]) = 0x1f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000010; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000010; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xa5a5a5a5a5a5a5a5; ++ *((unsigned long *)&__m256d_op1[2]) = 0xa5a5a5a5a5a99e03; ++ *((unsigned long *)&__m256d_op1[1]) = 0xa5a5a5a5a5a5a5a5; ++ *((unsigned long *)&__m256d_op1[0]) = 0xa5a5a5a5a5a99e03; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x24342434ffff2435; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x24342434ffff2435; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000013; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c +new file mode 100644 +index 000000000..6d6649f6f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c +@@ -0,0 +1,977 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00010101; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00010100; ++ *((int *)&__m256_op0[1]) = 0x00010000; ++ *((int *)&__m256_op0[0]) = 0x01000100; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xbf7f7fff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xe651bfff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x000000ff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x000000ff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x0000ffff; ++ *((int *)&__m256_op1[6]) = 0xc0008001; ++ *((int *)&__m256_op1[5]) = 0x0000ffff; ++ *((int *)&__m256_op1[4]) = 0xc0008001; ++ *((int *)&__m256_op1[3]) = 0x0000ffff; ++ *((int *)&__m256_op1[2]) = 0xc0008001; ++ *((int *)&__m256_op1[1]) = 0x0000ffff; ++ *((int *)&__m256_op1[0]) = 0xc0008001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffc6ffc6; ++ *((int *)&__m256_op0[6]) = 0x003a003a; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffc6ffc6; ++ *((int *)&__m256_op0[2]) = 0x003a003a; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x71717171; ++ *((int *)&__m256_op1[6]) = 0x71010101; ++ *((int *)&__m256_op1[5]) = 0x8e8e8e8e; ++ *((int *)&__m256_op1[4]) = 0x8f00ffff; ++ *((int *)&__m256_op1[3]) = 0x71717171; ++ *((int *)&__m256_op1[2]) = 0x71010101; ++ *((int *)&__m256_op1[1]) = 0x8e8e8e8e; ++ *((int *)&__m256_op1[0]) = 0x8f00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x000e000e; ++ *((int *)&__m256_op1[4]) = 0x000e000e; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x000e000e; ++ *((int *)&__m256_op1[0]) = 0x000e000e; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000043; ++ *((int *)&__m256_op0[4]) = 0x0207f944; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000043; ++ *((int *)&__m256_op0[0]) = 0x0207f944; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000001; ++ *((int *)&__m256_op1[6]) = 0x9ffdf403; ++ *((int *)&__m256_op1[5]) = 0x00000001; ++ *((int *)&__m256_op1[4]) = 0x1ffd97c3; ++ *((int *)&__m256_op1[3]) = 0x00000001; ++ *((int *)&__m256_op1[2]) = 0x9ffdf403; ++ *((int *)&__m256_op1[1]) = 0x00000001; ++ *((int *)&__m256_op1[0]) = 0x1ffd97c3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x7fff7fff; ++ *((int *)&__m256_op0[4]) = 0x7fff7fff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x7fff7fff; ++ *((int *)&__m256_op0[0]) = 0x7fff7fff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000808; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xbea2e127; ++ *((int *)&__m256_op1[6]) = 0xc046721f; ++ *((int *)&__m256_op1[5]) = 0x1729c073; ++ *((int *)&__m256_op1[4]) = 0x816edebe; ++ *((int *)&__m256_op1[3]) = 0xde91f010; ++ *((int *)&__m256_op1[2]) = 0x000006f9; ++ *((int *)&__m256_op1[1]) = 0x5ef1f90e; ++ *((int *)&__m256_op1[0]) = 0xfefaf30d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000200; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000200; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000200; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000200; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000009; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000009; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000009; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffb80000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffb80000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000ffff; ++ *((int *)&__m256_op0[6]) = 0x0000ffff; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x0000ffff; ++ *((int *)&__m256_op0[2]) = 0x0000ffff; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfff0fff0; ++ *((int *)&__m256_op0[6]) = 0xff01ff01; ++ *((int *)&__m256_op0[5]) = 0xfff0fff0; ++ *((int *)&__m256_op0[4]) = 0xfff0fff0; ++ *((int *)&__m256_op0[3]) = 0xfff0fff0; ++ *((int *)&__m256_op0[2]) = 0xff01ff01; ++ *((int *)&__m256_op0[1]) = 0xfff0fff0; ++ *((int *)&__m256_op0[0]) = 0xfff0fff0; ++ *((int *)&__m256_op1[7]) = 0xffefffef; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xffefffef; ++ *((int *)&__m256_op1[4]) = 0xffefffef; ++ *((int *)&__m256_op1[3]) = 0xffefffef; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xffefffef; ++ *((int *)&__m256_op1[0]) = 0xffefffef; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x0000ffb1; ++ *((int *)&__m256_op1[6]) = 0x0001ff8f; ++ *((int *)&__m256_op1[5]) = 0x0001004c; ++ *((int *)&__m256_op1[4]) = 0x0001ff87; ++ *((int *)&__m256_op1[3]) = 0x0000ffb1; ++ *((int *)&__m256_op1[2]) = 0x0001ff8f; ++ *((int *)&__m256_op1[1]) = 0x0001004c; ++ *((int *)&__m256_op1[0]) = 0x0001ff87; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00010001; ++ *((int *)&__m256_op1[6]) = 0x00010001; ++ *((int *)&__m256_op1[5]) = 0x00010001; ++ *((int *)&__m256_op1[4]) = 0x00010001; ++ *((int *)&__m256_op1[3]) = 0x00010001; ++ *((int *)&__m256_op1[2]) = 0x00010001; ++ *((int *)&__m256_op1[1]) = 0x00010001; ++ *((int *)&__m256_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffff0000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffff0000; ++ *((int *)&__m256_op0[4]) = 0xffff0000; ++ *((int *)&__m256_op0[3]) = 0xffff0000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffff0000; ++ *((int *)&__m256_op0[0]) = 0xffff0000; ++ *((int *)&__m256_op1[7]) = 0x007f8080; ++ *((int *)&__m256_op1[6]) = 0x007f007f; ++ *((int *)&__m256_op1[5]) = 0x007f8080; ++ *((int *)&__m256_op1[4]) = 0x007f007f; ++ *((int *)&__m256_op1[3]) = 0x007f8080; ++ *((int *)&__m256_op1[2]) = 0x007f007f; ++ *((int *)&__m256_op1[1]) = 0x007f8080; ++ *((int *)&__m256_op1[0]) = 0x007f007f; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000033; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000033; ++ *((int *)&__m256_op1[7]) = 0x00004200; ++ *((int *)&__m256_op1[6]) = 0x80000000; ++ *((int *)&__m256_op1[5]) = 0x5fff5fff; ++ *((int *)&__m256_op1[4]) = 0x607f0000; ++ *((int *)&__m256_op1[3]) = 0x00004200; ++ *((int *)&__m256_op1[2]) = 0x80000000; ++ *((int *)&__m256_op1[1]) = 0x5fff5fff; ++ *((int *)&__m256_op1[0]) = 0x607f0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x7fff8000; ++ *((int *)&__m256_op1[6]) = 0x7fff0000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00008000; ++ *((int *)&__m256_op1[3]) = 0x7fff8000; ++ *((int *)&__m256_op1[2]) = 0x7fff0000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00008000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0x00100010; ++ *((int *)&__m256_op1[6]) = 0x00030000; ++ *((int *)&__m256_op1[5]) = 0x00100010; ++ *((int *)&__m256_op1[4]) = 0x00030000; ++ *((int *)&__m256_op1[3]) = 0x00100010; ++ *((int *)&__m256_op1[2]) = 0x00030000; ++ *((int *)&__m256_op1[1]) = 0x00100010; ++ *((int *)&__m256_op1[0]) = 0x00030000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xf90c0c0c00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0ca40c0c0c0c0cc0; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0c0c0c0c0cb60cc0; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfbe0b80c960c96d0; ++ *((unsigned long *)&__m256d_op1[3]) = 0x1b9763952fc4c101; ++ *((unsigned long *)&__m256d_op1[2]) = 0xe37affb42fc05f69; ++ *((unsigned long *)&__m256d_op1[1]) = 0x18b988e64facb558; ++ *((unsigned long *)&__m256d_op1[0]) = 0xe5fb66c81da8e5bb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x001e001ea1bfa1bf; ++ *((unsigned long *)&__m256d_op0[2]) = 0x001e001e83e5422e; ++ *((unsigned long *)&__m256d_op0[1]) = 0x001e001ea1bfa1bf; ++ *((unsigned long *)&__m256d_op0[0]) = 0x011f011f0244420e; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffe00f7ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffffffffff629d7; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfffe00f7ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffffffffff629d7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7ffffffffffffffe; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7ffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256d_op1[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256d_op1[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256d_op0[2]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256d_op0[0]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffe6ffe6e6800001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x19660019ff806680; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffe6ffe6e6800001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x19660019ff806680; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000010100000101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m256d_op1[3]) = 0x45d5555545d55555; ++ *((unsigned long *)&__m256d_op1[2]) = 0x74555555e8aaaaaa; ++ *((unsigned long *)&__m256d_op1[1]) = 0x45d5555545d55555; ++ *((unsigned long *)&__m256d_op1[0]) = 0x74555555e8aaaaaa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0003030300000300; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0003030300000300; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0003030300000100; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0003030300000100; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256d_op1[2]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256d_op1[0]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c +new file mode 100644 +index 000000000..a64dd7598 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c +@@ -0,0 +1,759 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0018796d; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00fffb04; ++ *((int *)&__m256_op0[6]) = 0x02fddf20; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00fffb04; ++ *((int *)&__m256_op0[2]) = 0x02fddf20; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x41dfffc0; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x41dfffdf; ++ *((int *)&__m256_op1[2]) = 0xffc00000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffee; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffee; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffee; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffee; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01fe007a; ++ *((int *)&__m256_op0[6]) = 0x01c40110; ++ *((int *)&__m256_op0[5]) = 0x019d00a2; ++ *((int *)&__m256_op0[4]) = 0x0039fff9; ++ *((int *)&__m256_op0[3]) = 0x01fe007a; ++ *((int *)&__m256_op0[2]) = 0x01c40110; ++ *((int *)&__m256_op0[1]) = 0x019d00a2; ++ *((int *)&__m256_op0[0]) = 0x003a0000; ++ *((int *)&__m256_op1[7]) = 0x0000fffe; ++ *((int *)&__m256_op1[6]) = 0x00800022; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x0000fffe; ++ *((int *)&__m256_op1[2]) = 0x00800022; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x7fff7ffe; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x7fff7ffe; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000002; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000002; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000002; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x04000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x04000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x04000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x04000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000040; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000040; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00010001; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00010001; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00010001; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256d_op0[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256d_op0[1]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256d_op0[0]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000007773; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000003373; ++ *((unsigned long *)&__m256d_op1[3]) = 0x1616161616161616; ++ *((unsigned long *)&__m256d_op1[2]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7ffe16167f161616; ++ *((unsigned long *)&__m256d_op1[0]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffcc8000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000007dfdff4b; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010183f9999b; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[1]) = 0x01010101d58f43c9; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0100000001000100; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000002070145; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000002070145; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x1b976395; ++ *((int *)&__m256_op0[6]) = 0x2fc4c101; ++ *((int *)&__m256_op0[5]) = 0xe37affb4; ++ *((int *)&__m256_op0[4]) = 0x2fc05f69; ++ *((int *)&__m256_op0[3]) = 0x18b988e6; ++ *((int *)&__m256_op0[2]) = 0x4facb558; ++ *((int *)&__m256_op0[1]) = 0xe5fb66c8; ++ *((int *)&__m256_op0[0]) = 0x1da8e5bb; ++ *((int *)&__m256_op1[7]) = 0x01a72334; ++ *((int *)&__m256_op1[6]) = 0xffff00ff; ++ *((int *)&__m256_op1[5]) = 0xff4f6838; ++ *((int *)&__m256_op1[4]) = 0xff937648; ++ *((int *)&__m256_op1[3]) = 0x00a2afb7; ++ *((int *)&__m256_op1[2]) = 0xfff00ecb; ++ *((int *)&__m256_op1[1]) = 0xffce110f; ++ *((int *)&__m256_op1[0]) = 0x004658c7; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x00001000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00001000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000ff00; ++ *((int *)&__m256_op0[6]) = 0x0000ffff; ++ *((int *)&__m256_op0[5]) = 0x000000ff; ++ *((int *)&__m256_op0[4]) = 0x000000ff; ++ *((int *)&__m256_op0[3]) = 0x0000ff00; ++ *((int *)&__m256_op0[2]) = 0x0000ffff; ++ *((int *)&__m256_op0[1]) = 0x000000ff; ++ *((int *)&__m256_op0[0]) = 0x000000ff; ++ *((int *)&__m256_op1[7]) = 0x0000ffee; ++ *((int *)&__m256_op1[6]) = 0x0000ff4c; ++ *((int *)&__m256_op1[5]) = 0x0000ff05; ++ *((int *)&__m256_op1[4]) = 0x0000ff3c; ++ *((int *)&__m256_op1[3]) = 0x0000fff9; ++ *((int *)&__m256_op1[2]) = 0x0000ff78; ++ *((int *)&__m256_op1[1]) = 0x0000ffa8; ++ *((int *)&__m256_op1[0]) = 0x0000ff31; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffff0000; ++ *((int *)&__m256_op1[4]) = 0xffff0000; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffff0000; ++ *((int *)&__m256_op1[0]) = 0xffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000ff01; ++ *((int *)&__m256_op0[6]) = 0x00ff0000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000ff01; ++ *((int *)&__m256_op0[3]) = 0x0000ff01; ++ *((int *)&__m256_op0[2]) = 0x00ff0000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ff01; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00010000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00010000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x02000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x02000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x01010000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x01010000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffff0101; ++ *((int *)&__m256_op1[4]) = 0x00000001; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffff0101; ++ *((int *)&__m256_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffffffb; ++ *((int *)&__m256_op0[6]) = 0xfffffffb; ++ *((int *)&__m256_op0[5]) = 0xfffffffb; ++ *((int *)&__m256_op0[4]) = 0xfffffffb; ++ *((int *)&__m256_op0[3]) = 0xfffffffb; ++ *((int *)&__m256_op0[2]) = 0xfffffffb; ++ *((int *)&__m256_op0[1]) = 0xfffffffb; ++ *((int *)&__m256_op0[0]) = 0xfffffffb; ++ *((int *)&__m256_op1[7]) = 0x0000ffff; ++ *((int *)&__m256_op1[6]) = 0x0001000e; ++ *((int *)&__m256_op1[5]) = 0x0000ffff; ++ *((int *)&__m256_op1[4]) = 0x0000ffff; ++ *((int *)&__m256_op1[3]) = 0x0000ffff; ++ *((int *)&__m256_op1[2]) = 0x0000ffff; ++ *((int *)&__m256_op1[1]) = 0x0000ffff; ++ *((int *)&__m256_op1[0]) = 0x0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x8080808280808082; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8080808280808082; ++ *((unsigned long *)&__m256d_op0[1]) = 0x8080808280808080; ++ *((unsigned long *)&__m256d_op0[0]) = 0x8080808280808082; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffeffff10000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffeffff10000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0f00204000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0f00204000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c +new file mode 100644 +index 000000000..733cc00ee +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c +@@ -0,0 +1,675 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xfe02fe02; ++ *((int *)&__m256_op0[2]) = 0xfee5fe22; ++ *((int *)&__m256_op0[1]) = 0xff49fe42; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x0000ffff; ++ *((int *)&__m256_op1[6]) = 0x0000ff80; ++ *((int *)&__m256_op1[5]) = 0x00004686; ++ *((int *)&__m256_op1[4]) = 0x00007f79; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0x0000ffff; ++ *((int *)&__m256_op1[1]) = 0x0000f328; ++ *((int *)&__m256_op1[0]) = 0x0000dfff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x01000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x01000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffff80cb; ++ *((int *)&__m256_op1[6]) = 0xfffffdf8; ++ *((int *)&__m256_op1[5]) = 0x00000815; ++ *((int *)&__m256_op1[4]) = 0x00000104; ++ *((int *)&__m256_op1[3]) = 0xffffffa4; ++ *((int *)&__m256_op1[2]) = 0xfffffffd; ++ *((int *)&__m256_op1[1]) = 0x00000007; ++ *((int *)&__m256_op1[0]) = 0x00000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffff0000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00003f3f; ++ *((int *)&__m256_op1[4]) = 0xc6c68787; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00003f3f; ++ *((int *)&__m256_op1[0]) = 0x87870000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000002; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0x0101ffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0x0101ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01000100; ++ *((int *)&__m256_op0[6]) = 0x01000100; ++ *((int *)&__m256_op0[5]) = 0x01000100; ++ *((int *)&__m256_op0[4]) = 0x01000100; ++ *((int *)&__m256_op0[3]) = 0x01000100; ++ *((int *)&__m256_op0[2]) = 0x01000100; ++ *((int *)&__m256_op0[1]) = 0x01000100; ++ *((int *)&__m256_op0[0]) = 0x01000100; ++ *((int *)&__m256_op1[7]) = 0x7f800000; ++ *((int *)&__m256_op1[6]) = 0x7f800000; ++ *((int *)&__m256_op1[5]) = 0x62d2acee; ++ *((int *)&__m256_op1[4]) = 0x7fc00000; ++ *((int *)&__m256_op1[3]) = 0x7f800000; ++ *((int *)&__m256_op1[2]) = 0x7f800000; ++ *((int *)&__m256_op1[1]) = 0x62d2acee; ++ *((int *)&__m256_op1[0]) = 0x7fc00000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000ff01; ++ *((int *)&__m256_op0[6]) = 0x00ff0000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000ff01; ++ *((int *)&__m256_op0[3]) = 0x0000ff01; ++ *((int *)&__m256_op0[2]) = 0x00ff0000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ff01; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000808; ++ *((int *)&__m256_op1[4]) = 0x00000808; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000808; ++ *((int *)&__m256_op1[0]) = 0x00000808; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffff8000; ++ *((int *)&__m256_op0[5]) = 0x7efefefe; ++ *((int *)&__m256_op0[4]) = 0x80ffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x7efefefe; ++ *((int *)&__m256_op0[0]) = 0x80ffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x0001ffaa; ++ *((int *)&__m256_op1[6]) = 0x0000040e; ++ *((int *)&__m256_op1[5]) = 0x00007168; ++ *((int *)&__m256_op1[4]) = 0x00007bb6; ++ *((int *)&__m256_op1[3]) = 0x0001ffe8; ++ *((int *)&__m256_op1[2]) = 0x0001fe9c; ++ *((int *)&__m256_op1[1]) = 0x00002282; ++ *((int *)&__m256_op1[0]) = 0x00001680; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x55555501; ++ *((int *)&__m256_op0[4]) = 0xfefefeab; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x01010101; ++ *((int *)&__m256_op0[1]) = 0x55555501; ++ *((int *)&__m256_op0[0]) = 0xfefefeab; ++ *((int *)&__m256_op1[7]) = 0x00000105; ++ *((int *)&__m256_op1[6]) = 0xfffffefb; ++ *((int *)&__m256_op1[5]) = 0xffffff02; ++ *((int *)&__m256_op1[4]) = 0x000000fe; ++ *((int *)&__m256_op1[3]) = 0x00000105; ++ *((int *)&__m256_op1[2]) = 0xfffffefb; ++ *((int *)&__m256_op1[1]) = 0xffffff02; ++ *((int *)&__m256_op1[0]) = 0x000000fe; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000080; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000080; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x0000ffce; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000fc7c; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x0000ffce; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000fc7c; ++ *((int *)&__m256_op1[7]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[6]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[5]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[4]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[3]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[2]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[1]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[0]) = 0xe7e7e7e7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0x0007a861; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0x0007a861; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00100010; ++ *((int *)&__m256_op1[5]) = 0x00100010; ++ *((int *)&__m256_op1[4]) = 0x00100010; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00100010; ++ *((int *)&__m256_op1[1]) = 0x00100010; ++ *((int *)&__m256_op1[0]) = 0x00100010; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01010101; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x01010101; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x01010101; ++ *((int *)&__m256_op0[2]) = 0x01010101; ++ *((int *)&__m256_op0[1]) = 0x01010101; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_op1[7]) = 0x000001e0; ++ *((int *)&__m256_op1[6]) = 0x01e001e0; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x000001e0; ++ *((int *)&__m256_op1[2]) = 0x01e001e0; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000000000000007f; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op1[3]) = 0xf800d0d8ffffeecf; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256d_op1[1]) = 0xf800d0d8ffffeecf; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x001ffffe00200000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x001ffffe00200000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0101000001010000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfff1000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfff1000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c +new file mode 100644 +index 000000000..190741070 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c +@@ -0,0 +1,872 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x7fff7fff; ++ *((int *)&__m256_op0[4]) = 0x7fff7fff; ++ *((int *)&__m256_op0[3]) = 0x7fff01fd; ++ *((int *)&__m256_op0[2]) = 0x7fff7fff; ++ *((int *)&__m256_op0[1]) = 0x00007fff; ++ *((int *)&__m256_op0[0]) = 0x7fff7fff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xdededede; ++ *((int *)&__m256_op0[6]) = 0xdededede; ++ *((int *)&__m256_op0[5]) = 0xdededede; ++ *((int *)&__m256_op0[4]) = 0xdededede; ++ *((int *)&__m256_op0[3]) = 0xdededede; ++ *((int *)&__m256_op0[2]) = 0xdededede; ++ *((int *)&__m256_op0[1]) = 0xdededede; ++ *((int *)&__m256_op0[0]) = 0xdededede; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000051; ++ *((int *)&__m256_op1[5]) = 0x00001010; ++ *((int *)&__m256_op1[4]) = 0x00000fff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000051; ++ *((int *)&__m256_op1[1]) = 0x00001010; ++ *((int *)&__m256_op1[0]) = 0x00000fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000051; ++ *((int *)&__m256_op0[5]) = 0x00001010; ++ *((int *)&__m256_op0[4]) = 0x00000fff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000051; ++ *((int *)&__m256_op0[1]) = 0x00001010; ++ *((int *)&__m256_op0[0]) = 0x00000fff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000ffff; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x0000ffff; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x000007c8; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x000007c8; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x80000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000001f; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000001f; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x0000001f; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x0000001f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xfff8ff40; ++ *((int *)&__m256_op0[5]) = 0x0000ff01; ++ *((int *)&__m256_op0[4]) = 0x00090040; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xfff8ff40; ++ *((int *)&__m256_op0[1]) = 0x0000ff01; ++ *((int *)&__m256_op0[0]) = 0x00090040; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xff1cff1c; ++ *((int *)&__m256_op1[6]) = 0xff1cff1c; ++ *((int *)&__m256_op1[5]) = 0xff1cff1c; ++ *((int *)&__m256_op1[4]) = 0xff1cff1c; ++ *((int *)&__m256_op1[3]) = 0xff1cff1c; ++ *((int *)&__m256_op1[2]) = 0xff1cff1c; ++ *((int *)&__m256_op1[1]) = 0xff1cff1c; ++ *((int *)&__m256_op1[0]) = 0xff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00fe01f0; ++ *((int *)&__m256_op0[6]) = 0x00010000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00c40086; ++ *((int *)&__m256_op0[3]) = 0x00fe01f0; ++ *((int *)&__m256_op0[2]) = 0x00010000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00c40086; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x0000ffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x0000ffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x0fff0180; ++ *((int *)&__m256_op0[4]) = 0x0fff0181; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x0fff0180; ++ *((int *)&__m256_op0[0]) = 0x0fff0181; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0003ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xfffffe20; ++ *((int *)&__m256_op0[5]) = 0x0000001d; ++ *((int *)&__m256_op0[4]) = 0xfffffe1f; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x5fa00000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x5fa00000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000004; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00007f95; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000004; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00007f95; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x7f010000; ++ *((int *)&__m256_op0[5]) = 0x00010000; ++ *((int *)&__m256_op0[4]) = 0x00007f7f; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x7f010000; ++ *((int *)&__m256_op0[1]) = 0x00010000; ++ *((int *)&__m256_op0[0]) = 0x00007f7f; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x003f0200; ++ *((int *)&__m256_op0[6]) = 0x01400200; ++ *((int *)&__m256_op0[5]) = 0x003f00ff; ++ *((int *)&__m256_op0[4]) = 0x003f00c4; ++ *((int *)&__m256_op0[3]) = 0x003f0200; ++ *((int *)&__m256_op0[2]) = 0x01400200; ++ *((int *)&__m256_op0[1]) = 0x003f00ff; ++ *((int *)&__m256_op0[0]) = 0x003f00c4; ++ *((int *)&__m256_op1[7]) = 0x00000101; ++ *((int *)&__m256_op1[6]) = 0x01010101; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000101; ++ *((int *)&__m256_op1[2]) = 0x01010101; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x01fe000000ff00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x01fe000001fe0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0101010101010102; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0101010201010204; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0101010101010102; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0101010101010102; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000e00ff00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010127272525; ++ *((unsigned long *)&__m256d_op1[2]) = 0x23a2a121179e951d; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010127272525; ++ *((unsigned long *)&__m256d_op1[0]) = 0x23a2a121179e951d; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256d_op1[3]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000040002; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000000000000007f; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xc600000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xc600000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff56ff55ff01ff01; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff56ff55ff01ff01; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000800000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c +new file mode 100644 +index 000000000..8dd58f228 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c +@@ -0,0 +1,340 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x01fe007a; ++ *((int *)&__m256_op1[6]) = 0x01c40110; ++ *((int *)&__m256_op1[5]) = 0x019d00a2; ++ *((int *)&__m256_op1[4]) = 0x0039fff9; ++ *((int *)&__m256_op1[3]) = 0x01fe007a; ++ *((int *)&__m256_op1[2]) = 0x01c40110; ++ *((int *)&__m256_op1[1]) = 0x019d00a2; ++ *((int *)&__m256_op1[0]) = 0x003a0000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xfff10000; ++ *((int *)&__m256_op0[4]) = 0xfff10000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xfff10000; ++ *((int *)&__m256_op0[0]) = 0xfff10000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xfff10000; ++ *((int *)&__m256_op1[4]) = 0xfff10000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffff00ffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffff00ffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256d_op1[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256d_op1[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000020006; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000020006; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000020006; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x7); ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffff0100000001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffff0100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x2); ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000050007; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000039; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c +new file mode 100644 +index 000000000..3230c101d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c +@@ -0,0 +1,361 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x01ffffff; ++ *((int *)&__m256_op1[4]) = 0xfe000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x01ffffff; ++ *((int *)&__m256_op1[0]) = 0xfe000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000504f; ++ *((int *)&__m256_op0[6]) = 0xffff3271; ++ *((int *)&__m256_op0[5]) = 0xffff47b4; ++ *((int *)&__m256_op0[4]) = 0xffff5879; ++ *((int *)&__m256_op0[3]) = 0x0000504f; ++ *((int *)&__m256_op0[2]) = 0xffff3271; ++ *((int *)&__m256_op0[1]) = 0xffff47b4; ++ *((int *)&__m256_op0[0]) = 0xffff5879; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xde00fe00; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x0000fe01; ++ *((int *)&__m256_op0[4]) = 0x0000fe01; ++ *((int *)&__m256_op0[3]) = 0xde00fe00; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x0000fe01; ++ *((int *)&__m256_op0[0]) = 0x0000fe01; ++ *((int *)&__m256_op1[7]) = 0x0000ffff; ++ *((int *)&__m256_op1[6]) = 0x0000ffff; ++ *((int *)&__m256_op1[5]) = 0x00ff00fe; ++ *((int *)&__m256_op1[4]) = 0x00ff00fe; ++ *((int *)&__m256_op1[3]) = 0x0000ffff; ++ *((int *)&__m256_op1[2]) = 0x0000ffff; ++ *((int *)&__m256_op1[1]) = 0x00ff00fe; ++ *((int *)&__m256_op1[0]) = 0x00ff00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[6]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[5]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[4]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[3]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[2]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[1]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[0]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[7]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[6]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[5]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[4]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[3]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[2]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[1]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[0]) = 0xf3f3f3f3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x0007a861; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x0007a861; ++ *((int *)&__m256_op1[7]) = 0x80008000; ++ *((int *)&__m256_op1[6]) = 0x80008000; ++ *((int *)&__m256_op1[5]) = 0x80008000; ++ *((int *)&__m256_op1[4]) = 0xfff98000; ++ *((int *)&__m256_op1[3]) = 0x80008000; ++ *((int *)&__m256_op1[2]) = 0x80008000; ++ *((int *)&__m256_op1[1]) = 0x80008000; ++ *((int *)&__m256_op1[0]) = 0xfff98000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000015d050192cb; ++ *((unsigned long *)&__m256d_op0[2]) = 0x028e509508b16ee9; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000033ff01020e23; ++ *((unsigned long *)&__m256d_op0[0]) = 0x151196b58fd1114d; ++ *((unsigned long *)&__m256d_op1[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256d_op1[2]) = 0xc5c085372cfabfba; ++ *((unsigned long *)&__m256d_op1[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101010101010203; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00fe01f000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00fe01f000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvfcmp-saf-seq-sl.patch b/LoongArch-Add-tests-for-ASX-vector-xvfcmp-saf-seq-sl.patch new file mode 100644 index 0000000000000000000000000000000000000000..afc40a21757bd57974d7974ff292ba17785a6841 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvfcmp-saf-seq-sl.patch @@ -0,0 +1,4824 @@ +From beaeb3f05a71c637d47a0e5f86f5781345e10f97 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 16:48:35 +0800 +Subject: [PATCH 117/124] LoongArch: Add tests for ASX vector + xvfcmp{saf/seq/sle/slt/sne/sor/sun} instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvfcmp_saf_s.c | 424 ++++++ + .../loongarch/vector/lasx/lasx-xvfcmp_seq_s.c | 924 +++++++++++++ + .../loongarch/vector/lasx/lasx-xvfcmp_sle_s.c | 627 +++++++++ + .../loongarch/vector/lasx/lasx-xvfcmp_slt_s.c | 1212 +++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvfcmp_sne_s.c | 756 ++++++++++ + .../loongarch/vector/lasx/lasx-xvfcmp_sor_s.c | 438 ++++++ + .../loongarch/vector/lasx/lasx-xvfcmp_sun_s.c | 363 +++++ + 7 files changed, 4744 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c +new file mode 100644 +index 000000000..23cbc4bf0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c +@@ -0,0 +1,424 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0x0eb7aaaa; ++ *((int *)&__m256_op1[6]) = 0xa6e6ac80; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x0eb7aaaa; ++ *((int *)&__m256_op1[2]) = 0xa6e6ac80; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x3fff3fff; ++ *((int *)&__m256_op0[6]) = 0x3fff3fff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x3fff3fff; ++ *((int *)&__m256_op0[3]) = 0x3fff3fff; ++ *((int *)&__m256_op0[2]) = 0x3fff3fff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x3fff3fff; ++ *((int *)&__m256_op1[7]) = 0x017e01fe; ++ *((int *)&__m256_op1[6]) = 0x01fe01fe; ++ *((int *)&__m256_op1[5]) = 0x05860606; ++ *((int *)&__m256_op1[4]) = 0x01fe0202; ++ *((int *)&__m256_op1[3]) = 0x017e01fe; ++ *((int *)&__m256_op1[2]) = 0x01fe0000; ++ *((int *)&__m256_op1[1]) = 0x05860606; ++ *((int *)&__m256_op1[0]) = 0x01fe0004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000003f; ++ *((int *)&__m256_op0[6]) = 0x00390035; ++ *((int *)&__m256_op0[5]) = 0x8015003f; ++ *((int *)&__m256_op0[4]) = 0x0006001f; ++ *((int *)&__m256_op0[3]) = 0x0000003f; ++ *((int *)&__m256_op0[2]) = 0x00390035; ++ *((int *)&__m256_op0[1]) = 0x8015003f; ++ *((int *)&__m256_op0[0]) = 0x0006001f; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xefdfefdf; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xefdfefdf; ++ *((int *)&__m256_op1[4]) = 0xefdfefdf; ++ *((int *)&__m256_op1[3]) = 0xefdfefdf; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xefdfefdf; ++ *((int *)&__m256_op1[0]) = 0xefdfefdf; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00ff00ff; ++ *((int *)&__m256_op1[6]) = 0x00ff00ff; ++ *((int *)&__m256_op1[5]) = 0x00ff00ff; ++ *((int *)&__m256_op1[4]) = 0x00ff00ff; ++ *((int *)&__m256_op1[3]) = 0x00ff00ff; ++ *((int *)&__m256_op1[2]) = 0x00ff00ff; ++ *((int *)&__m256_op1[1]) = 0x00ff00ff; ++ *((int *)&__m256_op1[0]) = 0x00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x7bfffff0; ++ *((int *)&__m256_op0[5]) = 0x00000001; ++ *((int *)&__m256_op0[4]) = 0x80007fe8; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x7bfffff0; ++ *((int *)&__m256_op0[1]) = 0x00000001; ++ *((int *)&__m256_op0[0]) = 0x80007fe8; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x02020102; ++ *((int *)&__m256_op0[6]) = 0x02020102; ++ *((int *)&__m256_op0[5]) = 0x02020102; ++ *((int *)&__m256_op0[4]) = 0x02020102; ++ *((int *)&__m256_op0[3]) = 0x02020102; ++ *((int *)&__m256_op0[2]) = 0x02020102; ++ *((int *)&__m256_op0[1]) = 0x02020102; ++ *((int *)&__m256_op0[0]) = 0x02020102; ++ *((int *)&__m256_op1[7]) = 0x3e800000; ++ *((int *)&__m256_op1[6]) = 0x3e800000; ++ *((int *)&__m256_op1[5]) = 0x3e800000; ++ *((int *)&__m256_op1[4]) = 0x3e800000; ++ *((int *)&__m256_op1[3]) = 0x3e800000; ++ *((int *)&__m256_op1[2]) = 0x3e800000; ++ *((int *)&__m256_op1[1]) = 0x3e800000; ++ *((int *)&__m256_op1[0]) = 0x3e800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00ff00ff; ++ *((int *)&__m256_op0[6]) = 0x00ff00ff; ++ *((int *)&__m256_op0[5]) = 0x00ff00ff; ++ *((int *)&__m256_op0[4]) = 0x00ff00ff; ++ *((int *)&__m256_op0[3]) = 0x00ff00ff; ++ *((int *)&__m256_op0[2]) = 0x00ff00ff; ++ *((int *)&__m256_op0[1]) = 0x00ff00ff; ++ *((int *)&__m256_op0[0]) = 0x00ff00ff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xff88ff88; ++ *((int *)&__m256_op0[3]) = 0x80000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xff88ff88; ++ *((int *)&__m256_op1[7]) = 0xfe01fe01; ++ *((int *)&__m256_op1[6]) = 0x0000fd02; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x3fc03fc0; ++ *((int *)&__m256_op1[3]) = 0xfe01fe01; ++ *((int *)&__m256_op1[2]) = 0x0000fd02; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x3fc03fc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0xffffb2f6; ++ *((int *)&__m256_op0[4]) = 0x00006f48; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0xffffb2f6; ++ *((int *)&__m256_op0[0]) = 0x00006f48; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x000000ff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00100010; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00100010; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00100010; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0020000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0020000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000408080c111414; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000408080c111414; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000408080c111414; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000200000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x3fffbfff80000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00004000007f8000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x3fffbfff80000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00004000007f8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c +new file mode 100644 +index 000000000..6641d2c58 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c +@@ -0,0 +1,924 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x59800000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x59800000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x41d66000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x41d66000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xa41aa42e; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xa41aa42e; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x83f95466; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x01010101; ++ *((int *)&__m256_op0[0]) = 0x00005400; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xfefefeff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xff295329; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xfefefeff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xff295329; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x111ebb78; ++ *((int *)&__m256_op1[6]) = 0x4f9c4100; ++ *((int *)&__m256_op1[5]) = 0x1c386546; ++ *((int *)&__m256_op1[4]) = 0x809f3b50; ++ *((int *)&__m256_op1[3]) = 0x111ebb78; ++ *((int *)&__m256_op1[2]) = 0x4f9bf1ac; ++ *((int *)&__m256_op1[1]) = 0x21f6050d; ++ *((int *)&__m256_op1[0]) = 0x955d3f68; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffff0000; ++ *((int *)&__m256_op1[4]) = 0xffff0001; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffff0000; ++ *((int *)&__m256_op1[0]) = 0xffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000100; ++ *((int *)&__m256_op0[5]) = 0x00000002; ++ *((int *)&__m256_op0[4]) = 0xff910072; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000100; ++ *((int *)&__m256_op0[1]) = 0x00000002; ++ *((int *)&__m256_op0[0]) = 0xff910072; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffff97a2; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffff97a2; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x55555555; ++ *((int *)&__m256_op0[6]) = 0x3f800000; ++ *((int *)&__m256_op0[5]) = 0x55555555; ++ *((int *)&__m256_op0[4]) = 0x80000000; ++ *((int *)&__m256_op0[3]) = 0x55555555; ++ *((int *)&__m256_op0[2]) = 0x3f800000; ++ *((int *)&__m256_op0[1]) = 0x55555555; ++ *((int *)&__m256_op0[0]) = 0x80000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x0001fffe; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x0001fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00018002; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000002; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00018002; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000002; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00030000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00030000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xfff70156; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xfff70156; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xfff70156; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xfff70156; ++ *((int *)&__m256_op1[7]) = 0x7fefffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0x7fefffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x7fefffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0x7fefffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000ff70; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ff70; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000100; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000100; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000002; ++ *((int *)&__m256_op1[4]) = 0x00000008; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000002; ++ *((int *)&__m256_op1[0]) = 0x00000008; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x4393a0a5; ++ *((int *)&__m256_op0[6]) = 0xbc606060; ++ *((int *)&__m256_op0[5]) = 0x43b32fee; ++ *((int *)&__m256_op0[4]) = 0xa9000000; ++ *((int *)&__m256_op0[3]) = 0x4393a0a5; ++ *((int *)&__m256_op0[2]) = 0xbc606060; ++ *((int *)&__m256_op0[1]) = 0x43b32fee; ++ *((int *)&__m256_op0[0]) = 0xa9000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000001; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000003; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000003; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000003; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000003; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffeb664; ++ *((int *)&__m256_op0[6]) = 0x007ffd61; ++ *((int *)&__m256_op0[5]) = 0xfffe97a1; ++ *((int *)&__m256_op0[4]) = 0xdf5b41b0; ++ *((int *)&__m256_op0[3]) = 0xfffeb664; ++ *((int *)&__m256_op0[2]) = 0x007ffd61; ++ *((int *)&__m256_op0[1]) = 0xfffe97a1; ++ *((int *)&__m256_op0[0]) = 0xdf5b41b0; ++ *((int *)&__m256_op1[7]) = 0xfffeb683; ++ *((int *)&__m256_op1[6]) = 0x9ffffd80; ++ *((int *)&__m256_op1[5]) = 0xfffe97c0; ++ *((int *)&__m256_op1[4]) = 0x20010001; ++ *((int *)&__m256_op1[3]) = 0xfffeb683; ++ *((int *)&__m256_op1[2]) = 0x9ffffd80; ++ *((int *)&__m256_op1[1]) = 0xfffe97c0; ++ *((int *)&__m256_op1[0]) = 0x20010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x053531f7c6334908; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8e41dcbff87e7900; ++ *((unsigned long *)&__m256d_op0[1]) = 0x12eb8332e3e15093; ++ *((unsigned long *)&__m256d_op0[0]) = 0x9a7491f9e016ccd4; ++ *((unsigned long *)&__m256d_op1[3]) = 0x345947dcd192b5c4; ++ *((unsigned long *)&__m256d_op1[2]) = 0x182100c72280e687; ++ *((unsigned long *)&__m256d_op1[1]) = 0x4a1c80bb8e892e00; ++ *((unsigned long *)&__m256d_op1[0]) = 0x063ecfbd58abc4b7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffff0002fffeffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffff0002fffeffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000000010486048c; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000100000006; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000000010486048c; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000100000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256d_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00ff00ff00ef0120; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00ff00ff00ef0120; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xff00ffff00000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op0[1]) = 0xff00ffff00000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x04e8296f08181818; ++ *((unsigned long *)&__m256d_op1[2]) = 0x032feea900000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x04e8296f08181818; ++ *((unsigned long *)&__m256d_op1[0]) = 0x032feea900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x1400080008000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x1400080008000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x1400080008000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x1400080008000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffff040000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffff040000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256d_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256d_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256d_op1[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffe045fffffeff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffff7d; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c +new file mode 100644 +index 000000000..d25fc25da +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c +@@ -0,0 +1,627 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffff90; ++ *((int *)&__m256_op0[4]) = 0xffffff80; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffff90; ++ *((int *)&__m256_op0[0]) = 0xffffff80; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfefee0e3; ++ *((int *)&__m256_op0[6]) = 0xfefefe00; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xfefee0e3; ++ *((int *)&__m256_op0[2]) = 0xfefefe00; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000ffff; ++ *((int *)&__m256_op0[6]) = 0x0000ffff; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x0000ffff; ++ *((int *)&__m256_op0[2]) = 0x0000ffff; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x8000000a; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x8000000a; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01010101; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x01010101; ++ *((int *)&__m256_op0[4]) = 0x01010101; ++ *((int *)&__m256_op0[3]) = 0x01010101; ++ *((int *)&__m256_op0[2]) = 0x01010101; ++ *((int *)&__m256_op0[1]) = 0x01010101; ++ *((int *)&__m256_op0[0]) = 0x01010101; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffd8ffc7; ++ *((int *)&__m256_op0[4]) = 0xffdaff8a; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffd8ffc7; ++ *((int *)&__m256_op0[0]) = 0xffdaff8a; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0xffffb3b4; ++ *((int *)&__m256_op1[5]) = 0xfffffff5; ++ *((int *)&__m256_op1[4]) = 0xffff4738; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0xffffb3b4; ++ *((int *)&__m256_op1[1]) = 0xfffffff5; ++ *((int *)&__m256_op1[0]) = 0xffff4738; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xf7f7f7f7; ++ *((int *)&__m256_op1[6]) = 0xf7f7f7f8; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xf7f7f7f7; ++ *((int *)&__m256_op1[2]) = 0xf7f7f7f8; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x5fa00000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x5fa00000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op0[2]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op0[1]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op0[0]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op1[3]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op1[2]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op1[1]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op1[0]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000105fffffefb; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffff02000000fe; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000105fffffefb; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffff02000000fe; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000020afefb1; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7f350104f7ebffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000003fffc1; ++ *((unsigned long *)&__m256d_op0[0]) = 0x005c0003fff9ffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x8d8d72728d8d7272; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long *)&__m256d_op0[1]) = 0x8d8d72728d8d7272; ++ *((unsigned long *)&__m256d_op0[0]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffff010100000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffff010100000001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c +new file mode 100644 +index 000000000..8210f749b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c +@@ -0,0 +1,1212 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000101; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xc08f7800; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xfffffefd; ++ *((int *)&__m256_op0[3]) = 0xc08f7800; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000101; ++ *((int *)&__m256_op1[4]) = 0x00000102; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000101; ++ *((int *)&__m256_op1[0]) = 0x00000102; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x001f1f3e; ++ *((int *)&__m256_op1[6]) = 0x3e1f1f00; ++ *((int *)&__m256_op1[5]) = 0x00030609; ++ *((int *)&__m256_op1[4]) = 0x09060300; ++ *((int *)&__m256_op1[3]) = 0x001f1f3e; ++ *((int *)&__m256_op1[2]) = 0x3e1f1f00; ++ *((int *)&__m256_op1[1]) = 0x00030609; ++ *((int *)&__m256_op1[0]) = 0x09060300; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7fffffff; ++ *((int *)&__m256_op0[6]) = 0x7fffffff; ++ *((int *)&__m256_op0[5]) = 0x7fffffff; ++ *((int *)&__m256_op0[4]) = 0x7fffffff; ++ *((int *)&__m256_op0[3]) = 0x7fffffff; ++ *((int *)&__m256_op0[2]) = 0x7fffffff; ++ *((int *)&__m256_op0[1]) = 0x7fffffff; ++ *((int *)&__m256_op0[0]) = 0x7fffffff; ++ *((int *)&__m256_op1[7]) = 0x20fc0000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x20fc0000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffff0400; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xffff0400; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x08050501; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x08050501; ++ *((int *)&__m256_op1[7]) = 0x90909090; ++ *((int *)&__m256_op1[6]) = 0x90909090; ++ *((int *)&__m256_op1[5]) = 0x90909090; ++ *((int *)&__m256_op1[4]) = 0x90909090; ++ *((int *)&__m256_op1[3]) = 0x90909090; ++ *((int *)&__m256_op1[2]) = 0x90909090; ++ *((int *)&__m256_op1[1]) = 0x90909090; ++ *((int *)&__m256_op1[0]) = 0x90909090; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00001ff8; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xd8d8c000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00001ff8; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xd8d8c000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x02020102; ++ *((int *)&__m256_op1[6]) = 0x02020102; ++ *((int *)&__m256_op1[5]) = 0x02020102; ++ *((int *)&__m256_op1[4]) = 0x02020102; ++ *((int *)&__m256_op1[3]) = 0x02020102; ++ *((int *)&__m256_op1[2]) = 0x02020102; ++ *((int *)&__m256_op1[1]) = 0x02020102; ++ *((int *)&__m256_op1[0]) = 0x02020102; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00ff00ff; ++ *((int *)&__m256_op0[6]) = 0x00ff00ff; ++ *((int *)&__m256_op0[5]) = 0x00ff00ff; ++ *((int *)&__m256_op0[4]) = 0x00ff00ff; ++ *((int *)&__m256_op0[3]) = 0x00ff00ff; ++ *((int *)&__m256_op0[2]) = 0x00ff00ff; ++ *((int *)&__m256_op0[1]) = 0x00ff00ff; ++ *((int *)&__m256_op0[0]) = 0x00ff00ff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000001; ++ *((int *)&__m256_op1[6]) = 0xffe00000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000001; ++ *((int *)&__m256_op1[2]) = 0xffe00000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x60000108; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x01060005; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x7fef0001; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xfffffff8; ++ *((int *)&__m256_op1[4]) = 0xfffffff8; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xfffffff8; ++ *((int *)&__m256_op1[0]) = 0xfc000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x327f0101; ++ *((int *)&__m256_op0[6]) = 0x01010102; ++ *((int *)&__m256_op0[5]) = 0x63000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x327f0101; ++ *((int *)&__m256_op0[2]) = 0x01010102; ++ *((int *)&__m256_op0[1]) = 0x63000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xce7fffff; ++ *((int *)&__m256_op1[6]) = 0xfffffffe; ++ *((int *)&__m256_op1[5]) = 0x63000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xce7fffff; ++ *((int *)&__m256_op1[2]) = 0xfffffffe; ++ *((int *)&__m256_op1[1]) = 0x63000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x59800000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x59800000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x0eb7aaaa; ++ *((int *)&__m256_op1[6]) = 0xa6e6ac80; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x0eb7aaaa; ++ *((int *)&__m256_op1[2]) = 0xa6e6ac80; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000007; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xdbc80000; ++ *((int *)&__m256_op1[6]) = 0x00003fff; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xdbc80000; ++ *((int *)&__m256_op1[2]) = 0x00003fff; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000002; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000007f; ++ *((int *)&__m256_op1[7]) = 0xfffffff3; ++ *((int *)&__m256_op1[6]) = 0x0000000b; ++ *((int *)&__m256_op1[5]) = 0xfffffff3; ++ *((int *)&__m256_op1[4]) = 0xfffffff3; ++ *((int *)&__m256_op1[3]) = 0xfffffff3; ++ *((int *)&__m256_op1[2]) = 0x0000000b; ++ *((int *)&__m256_op1[1]) = 0xfffffff3; ++ *((int *)&__m256_op1[0]) = 0xfffffff3; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x223d76f0; ++ *((int *)&__m256_op0[6]) = 0x9f3881ff; ++ *((int *)&__m256_op0[5]) = 0x3870ca8d; ++ *((int *)&__m256_op0[4]) = 0x013e76a0; ++ *((int *)&__m256_op0[3]) = 0x223d76f0; ++ *((int *)&__m256_op0[2]) = 0x9f37e357; ++ *((int *)&__m256_op0[1]) = 0x43ec0a1b; ++ *((int *)&__m256_op0[0]) = 0x2aba7ed0; ++ *((int *)&__m256_op1[7]) = 0x111ebb78; ++ *((int *)&__m256_op1[6]) = 0x4f9c4100; ++ *((int *)&__m256_op1[5]) = 0x1c386546; ++ *((int *)&__m256_op1[4]) = 0x809f3b50; ++ *((int *)&__m256_op1[3]) = 0x111ebb78; ++ *((int *)&__m256_op1[2]) = 0x4f9bf1ac; ++ *((int *)&__m256_op1[1]) = 0x21f6050d; ++ *((int *)&__m256_op1[0]) = 0x955d3f68; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01010101; ++ *((int *)&__m256_op0[6]) = 0x27272525; ++ *((int *)&__m256_op0[5]) = 0x23a2a121; ++ *((int *)&__m256_op0[4]) = 0x179e951d; ++ *((int *)&__m256_op0[3]) = 0x01010101; ++ *((int *)&__m256_op0[2]) = 0x27272525; ++ *((int *)&__m256_op0[1]) = 0x23a2a121; ++ *((int *)&__m256_op0[0]) = 0x179e951d; ++ *((int *)&__m256_op1[7]) = 0x00001251; ++ *((int *)&__m256_op1[6]) = 0x00005111; ++ *((int *)&__m256_op1[5]) = 0x00000c4f; ++ *((int *)&__m256_op1[4]) = 0x00004b0f; ++ *((int *)&__m256_op1[3]) = 0x00001251; ++ *((int *)&__m256_op1[2]) = 0x00005111; ++ *((int *)&__m256_op1[1]) = 0x00000c4f; ++ *((int *)&__m256_op1[0]) = 0x00004b0f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x80000000; ++ *((int *)&__m256_op1[6]) = 0xff800000; ++ *((int *)&__m256_op1[5]) = 0x80000000; ++ *((int *)&__m256_op1[4]) = 0x80000000; ++ *((int *)&__m256_op1[3]) = 0x80000000; ++ *((int *)&__m256_op1[2]) = 0xff800000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0x80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_op1[7]) = 0x7ff00000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x7ff00000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x7ff00000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x7ff00000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x000000ff; ++ *((int *)&__m256_op1[6]) = 0x000000ff; ++ *((int *)&__m256_op1[5]) = 0x000000ff; ++ *((int *)&__m256_op1[4]) = 0x000000ff; ++ *((int *)&__m256_op1[3]) = 0x000000ff; ++ *((int *)&__m256_op1[2]) = 0x000000ff; ++ *((int *)&__m256_op1[1]) = 0x000000ff; ++ *((int *)&__m256_op1[0]) = 0x000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfe01fe01; ++ *((int *)&__m256_op0[6]) = 0x7e81fd02; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x3fc001fe; ++ *((int *)&__m256_op0[3]) = 0xfe01fe01; ++ *((int *)&__m256_op0[2]) = 0x7e81fd02; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x3fc001fe; ++ *((int *)&__m256_op1[7]) = 0xfe01fe01; ++ *((int *)&__m256_op1[6]) = 0x7e81fd02; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x3fc001fe; ++ *((int *)&__m256_op1[3]) = 0xfe01fe01; ++ *((int *)&__m256_op1[2]) = 0x7e81fd02; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x3fc001fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x80000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x80000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x80000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0ff80100ffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0ff80100ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000017000000080; ++ *((unsigned long *)&__m256d_op1[2]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000017000000080; ++ *((unsigned long *)&__m256d_op1[0]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x01480000052801a2; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffdcff64; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101010101010203; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffff000100000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000fe0100000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000fe0100000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000001900000019; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000fff00004542; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000fff00004542; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00aa00ab00ff00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00aa00ab00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffe37fe3001d001d; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffe37fe3001d001d; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffa30000165a; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000104000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffa30000165a; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000104000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfe01fe017e81fd02; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000003fc001fe; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfe01fe017e81fd02; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000003fc001fe; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256d_op1[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256d_op1[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000080000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000b004a00440040; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8020004a0011002a; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000b004a00440040; ++ *((unsigned long *)&__m256d_op0[0]) = 0x8020004a0011002a; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0fff0fff00000020; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0fff0fff00000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256d_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256d_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256d_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffefffefffeffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffefffefffeffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000860601934; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000860601934; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[2]) = 0x4079808280057efe; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x007ffcfcfd020202; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000400000004; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c +new file mode 100644 +index 000000000..9d015a5c8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c +@@ -0,0 +1,756 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x80808080; ++ *((int *)&__m256_op0[6]) = 0x80808080; ++ *((int *)&__m256_op0[5]) = 0x80808080; ++ *((int *)&__m256_op0[4]) = 0x80808080; ++ *((int *)&__m256_op0[3]) = 0x80808080; ++ *((int *)&__m256_op0[2]) = 0x80808080; ++ *((int *)&__m256_op0[1]) = 0x80808080; ++ *((int *)&__m256_op0[0]) = 0x80808080; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xefefefef; ++ *((int *)&__m256_op0[6]) = 0xefefefef; ++ *((int *)&__m256_op0[5]) = 0xefefefef; ++ *((int *)&__m256_op0[4]) = 0xefefefef; ++ *((int *)&__m256_op0[3]) = 0xefefefef; ++ *((int *)&__m256_op0[2]) = 0xefefef6e; ++ *((int *)&__m256_op0[1]) = 0xeeeeeeee; ++ *((int *)&__m256_op0[0]) = 0xeeeeeeee; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7f800000; ++ *((int *)&__m256_op0[6]) = 0x7f800000; ++ *((int *)&__m256_op0[5]) = 0x7f800000; ++ *((int *)&__m256_op0[4]) = 0x7f800000; ++ *((int *)&__m256_op0[3]) = 0x7f800000; ++ *((int *)&__m256_op0[2]) = 0x7f800000; ++ *((int *)&__m256_op0[1]) = 0x7f800000; ++ *((int *)&__m256_op0[0]) = 0x7f800000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0x00ff00ff; ++ *((int *)&__m256_op1[6]) = 0x00ff00ff; ++ *((int *)&__m256_op1[5]) = 0x00ff00ff; ++ *((int *)&__m256_op1[4]) = 0x00ff00ff; ++ *((int *)&__m256_op1[3]) = 0x00ff00ff; ++ *((int *)&__m256_op1[2]) = 0x00ff00ff; ++ *((int *)&__m256_op1[1]) = 0x00ff00ff; ++ *((int *)&__m256_op1[0]) = 0x00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x80000000; ++ *((int *)&__m256_op1[4]) = 0x80000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0x80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x40404040; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x40404040; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xfefefefe; ++ *((int *)&__m256_op1[4]) = 0x3f800000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xfefefefe; ++ *((int *)&__m256_op1[0]) = 0x3f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffff0101; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffff0101; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x55555501; ++ *((int *)&__m256_op0[4]) = 0xfefefeab; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x01010101; ++ *((int *)&__m256_op0[1]) = 0x55555501; ++ *((int *)&__m256_op0[0]) = 0xfefefeab; ++ *((int *)&__m256_op1[7]) = 0x0010bfc8; ++ *((int *)&__m256_op1[6]) = 0x0010bf52; ++ *((int *)&__m256_op1[5]) = 0xfff1bfca; ++ *((int *)&__m256_op1[4]) = 0x0011bfcb; ++ *((int *)&__m256_op1[3]) = 0x0010bfc8; ++ *((int *)&__m256_op1[2]) = 0x0010bf52; ++ *((int *)&__m256_op1[1]) = 0xfff1bfca; ++ *((int *)&__m256_op1[0]) = 0x0011bfcb; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x80008000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x80008000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x80008000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x80008000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00060000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00060000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000166; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000166; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000004a; ++ *((int *)&__m256_op0[6]) = 0x557baac4; ++ *((int *)&__m256_op0[5]) = 0x556caad9; ++ *((int *)&__m256_op0[4]) = 0xaabbaa88; ++ *((int *)&__m256_op0[3]) = 0x0000004a; ++ *((int *)&__m256_op0[2]) = 0x557baac4; ++ *((int *)&__m256_op0[1]) = 0x556caad9; ++ *((int *)&__m256_op0[0]) = 0xaabbaa88; ++ *((int *)&__m256_op1[7]) = 0x09090909; ++ *((int *)&__m256_op1[6]) = 0x09090909; ++ *((int *)&__m256_op1[5]) = 0x09090909; ++ *((int *)&__m256_op1[4]) = 0x09090909; ++ *((int *)&__m256_op1[3]) = 0x09090909; ++ *((int *)&__m256_op1[2]) = 0x09090909; ++ *((int *)&__m256_op1[1]) = 0x09090909; ++ *((int *)&__m256_op1[0]) = 0x09090909; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x80000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x80000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x80000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x80000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x80000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x80000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000020; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000020; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xbabababababababa; ++ *((unsigned long *)&__m256d_op0[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256d_op0[1]) = 0xbabababababababa; ++ *((unsigned long *)&__m256d_op0[0]) = 0xbabababababababa; ++ *((unsigned long *)&__m256d_op1[3]) = 0x88888a6d0962002e; ++ *((unsigned long *)&__m256d_op1[2]) = 0xdb8a3109fe0f0020; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000007fff01fffb; ++ *((unsigned long *)&__m256d_op1[0]) = 0xdb8e20990cce025a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256d_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256d_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256d_op0[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffffffffffebeb8; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffffffffffebeb8; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfafafafafafafafa; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000fefefe; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101010101010203; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ff050000ff3c; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000fff90000ff78; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ffa80000ff31; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0f00204000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0f00204000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0087ff87f807ff87; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0087ff87f807ff87; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c +new file mode 100644 +index 000000000..a61681073 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c +@@ -0,0 +1,438 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x80000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x000000ff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x000000ff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000064; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000781; ++ *((int *)&__m256_op0[0]) = 0x00000064; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0c6a2400; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x0f002040; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x0c6a2400; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x0f002040; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x0000000c; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x0000000c; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0feff00000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0feff00000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[0]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfefefefeffe0e0e0; ++ *((unsigned long *)&__m256d_op0[1]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfefefefeffe0e0e0; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000040004000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c +new file mode 100644 +index 000000000..41f274920 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c +@@ -0,0 +1,363 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x1e180000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x1e180000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x1e180000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x1e180000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00802000; ++ *((int *)&__m256_op1[6]) = 0x00802000; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00802000; ++ *((int *)&__m256_op1[2]) = 0x00802000; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000055; ++ *((int *)&__m256_op0[6]) = 0x36aaaaac; ++ *((int *)&__m256_op0[5]) = 0x55555555; ++ *((int *)&__m256_op0[4]) = 0xaaaaaaac; ++ *((int *)&__m256_op0[3]) = 0x00000055; ++ *((int *)&__m256_op0[2]) = 0x36aaaaac; ++ *((int *)&__m256_op0[1]) = 0x55555555; ++ *((int *)&__m256_op0[0]) = 0xaaaaaaac; ++ *((int *)&__m256_op1[7]) = 0x00060000; ++ *((int *)&__m256_op1[6]) = 0x00040000; ++ *((int *)&__m256_op1[5]) = 0x00025555; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00060000; ++ *((int *)&__m256_op1[2]) = 0x00040000; ++ *((int *)&__m256_op1[1]) = 0x00025555; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xff240000; ++ *((int *)&__m256_op0[6]) = 0x0000ff00; ++ *((int *)&__m256_op0[5]) = 0xfffeffe4; ++ *((int *)&__m256_op0[4]) = 0xfffeff00; ++ *((int *)&__m256_op0[3]) = 0xff640000; ++ *((int *)&__m256_op0[2]) = 0x0000ff00; ++ *((int *)&__m256_op0[1]) = 0xfffeff66; ++ *((int *)&__m256_op0[0]) = 0xfffeff00; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80808082; ++ *((int *)&__m256_op0[6]) = 0x80808082; ++ *((int *)&__m256_op0[5]) = 0x80808082; ++ *((int *)&__m256_op0[4]) = 0x80808082; ++ *((int *)&__m256_op0[3]) = 0x80808082; ++ *((int *)&__m256_op0[2]) = 0x80808080; ++ *((int *)&__m256_op0[1]) = 0x80808082; ++ *((int *)&__m256_op0[0]) = 0x80808082; ++ *((int *)&__m256_op1[7]) = 0x55555555; ++ *((int *)&__m256_op1[6]) = 0x55555555; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x55555555; ++ *((int *)&__m256_op1[2]) = 0x55555555; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[6]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[5]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[4]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[3]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[2]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[1]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[0]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[7]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[6]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[5]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[4]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[3]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[2]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[1]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[0]) = 0x6d6d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000118; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000118; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000027; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000027; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvfnmadd-xvfrstp-.patch b/LoongArch-Add-tests-for-ASX-vector-xvfnmadd-xvfrstp-.patch new file mode 100644 index 0000000000000000000000000000000000000000..48b3b297d685754b3f8b3a7cb71d9a12d41b012a --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvfnmadd-xvfrstp-.patch @@ -0,0 +1,4991 @@ +From d0108f9375bd6eede5f7f4e289dce580b180848d Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 16:22:49 +0800 +Subject: [PATCH 114/124] LoongArch: Add tests for ASX vector + xvfnmadd/xvfrstp/xvfstpi/xvhsubw/ xvmsub/xvrotr/xvrotri/xvld/xvst + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvld.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmsub.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvrotr.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvrotri.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvst.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvfnmadd_d.c | 324 +++++++ + .../loongarch/vector/lasx/lasx-xvfnmadd_s.c | 895 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvfrstp.c | 381 ++++++++ + .../loongarch/vector/lasx/lasx-xvfrstpi.c | 350 +++++++ + .../loongarch/vector/lasx/lasx-xvhsubw-1.c | 620 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvhsubw-2.c | 545 +++++++++++ + .../loongarch/vector/lasx/lasx-xvld.c | 86 ++ + .../loongarch/vector/lasx/lasx-xvmsub.c | 647 +++++++++++++ + .../loongarch/vector/lasx/lasx-xvrotr.c | 530 +++++++++++ + .../loongarch/vector/lasx/lasx-xvrotri.c | 394 ++++++++ + .../loongarch/vector/lasx/lasx-xvst.c | 102 ++ + 11 files changed, 4874 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c +new file mode 100644 +index 000000000..d161c850c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c +@@ -0,0 +1,324 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0001010101010101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000010100; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0001000001000100; ++ *((unsigned long *)&__m256d_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long *)&__m256d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[0]) = 0xffffffffe651bfff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffe651bfff; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x3ff73ff83ff73ff8; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x3ff73ff83ff73ff8; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256d_op2[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256d_op2[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256d_op2[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256d_result[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256d_result[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256d_result[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256d_result[0]) = 0xa020202020206431; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256d_op0[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256d_op0[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256d_op2[2]) = 0x7f7f7f5c8f374980; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256d_op2[0]) = 0x7f7f7f5c8f374980; ++ *((unsigned long *)&__m256d_result[3]) = 0x8001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x8001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256d_op0[1]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256d_op1[3]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256d_op1[2]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256d_op1[1]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256d_op1[0]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x1080108010060002; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x1080108010060002; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256d_op2[3]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7fff00017fff0000; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x1716151417161514; ++ *((unsigned long *)&__m256d_op0[2]) = 0x1716151417161514; ++ *((unsigned long *)&__m256d_op0[1]) = 0x1716151417161514; ++ *((unsigned long *)&__m256d_op0[0]) = 0x1716151417161514; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000002780; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000002780; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000002780; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000002780; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0080200000802000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0080200000802000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0080200000802000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0080200000802000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256d_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256d_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256d_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffba0c05; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000005000000020; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0010000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0010000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0008000000000000; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256d_op0[1]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffff801000000010; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffff800300000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffff801000000010; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffff800300000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffe0000000; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000700000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000700000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c +new file mode 100644 +index 000000000..c5e9576ea +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c +@@ -0,0 +1,895 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffff5f5c; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffff5f5c; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffff5f5c; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffff5f5c; ++ *((int *)&__m256_op2[7]) = 0x0000000f; ++ *((int *)&__m256_op2[6]) = 0x0000000f; ++ *((int *)&__m256_op2[5]) = 0xff00ff0f; ++ *((int *)&__m256_op2[4]) = 0xff005f0f; ++ *((int *)&__m256_op2[3]) = 0x0000000f; ++ *((int *)&__m256_op2[2]) = 0x0000000f; ++ *((int *)&__m256_op2[1]) = 0xff00ff0f; ++ *((int *)&__m256_op2[0]) = 0xff005f0f; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffff5f5c; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffff5f5c; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffff5f5c; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffff5f5c; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00010001; ++ *((int *)&__m256_op0[6]) = 0x00010000; ++ *((int *)&__m256_op0[5]) = 0x020afefb; ++ *((int *)&__m256_op0[4]) = 0x08140000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x0003fffc; ++ *((int *)&__m256_op0[0]) = 0x00060000; ++ *((int *)&__m256_op1[7]) = 0x80000000; ++ *((int *)&__m256_op1[6]) = 0x40000000; ++ *((int *)&__m256_op1[5]) = 0x40000000; ++ *((int *)&__m256_op1[4]) = 0x10000010; ++ *((int *)&__m256_op1[3]) = 0x80000000; ++ *((int *)&__m256_op1[2]) = 0x40000000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0x40000010; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x000000ff; ++ *((int *)&__m256_op2[4]) = 0x0001ffff; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x0000ffff; ++ *((int *)&__m256_op2[0]) = 0x00010000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80020000; ++ *((int *)&__m256_result[5]) = 0x828aff0b; ++ *((int *)&__m256_result[4]) = 0x8001ffff; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000002; ++ *((int *)&__m256_result[1]) = 0x8000ffff; ++ *((int *)&__m256_result[0]) = 0x800d0002; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x1f3d2101; ++ *((int *)&__m256_op0[6]) = 0x1f3d2101; ++ *((int *)&__m256_op0[5]) = 0x1f3d2101; ++ *((int *)&__m256_op0[4]) = 0xd07dbf01; ++ *((int *)&__m256_op0[3]) = 0x9f1fd080; ++ *((int *)&__m256_op0[2]) = 0x1f3d2101; ++ *((int *)&__m256_op0[1]) = 0x1f3d2101; ++ *((int *)&__m256_op0[0]) = 0xd07dbf01; ++ *((int *)&__m256_op1[7]) = 0x1d949d94; ++ *((int *)&__m256_op1[6]) = 0x9d949d95; ++ *((int *)&__m256_op1[5]) = 0x1d949d94; ++ *((int *)&__m256_op1[4]) = 0x9e1423d4; ++ *((int *)&__m256_op1[3]) = 0x1de9a03f; ++ *((int *)&__m256_op1[2]) = 0x3dd41d95; ++ *((int *)&__m256_op1[1]) = 0x1d949d94; ++ *((int *)&__m256_op1[0]) = 0x9e1423d4; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x8001b72e; ++ *((int *)&__m256_result[6]) = 0x0001b72e; ++ *((int *)&__m256_result[5]) = 0x8001b72e; ++ *((int *)&__m256_result[4]) = 0xaf12d5f0; ++ *((int *)&__m256_result[3]) = 0x00024763; ++ *((int *)&__m256_result[2]) = 0x9d9cb530; ++ *((int *)&__m256_result[1]) = 0x8001b72e; ++ *((int *)&__m256_result[0]) = 0xaf12d5f0; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x1f0fdf7f; ++ *((int *)&__m256_op0[6]) = 0x3e3b31d4; ++ *((int *)&__m256_op0[5]) = 0x7ff80000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x1f0fdf7f; ++ *((int *)&__m256_op0[2]) = 0x3e3b31d4; ++ *((int *)&__m256_op0[1]) = 0x7ff80000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x7ff80000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x7ff80000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x80000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x80000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x80000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x0000ffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x0000ffff; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000001; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000001; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000001; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000001; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000001; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000001; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000001; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000200; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000200; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000200; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000200; ++ *((int *)&__m256_op2[7]) = 0xffffffa0; ++ *((int *)&__m256_op2[6]) = 0x00000001; ++ *((int *)&__m256_op2[5]) = 0xffffffe0; ++ *((int *)&__m256_op2[4]) = 0x00000001; ++ *((int *)&__m256_op2[3]) = 0xffffffa0; ++ *((int *)&__m256_op2[2]) = 0x00000001; ++ *((int *)&__m256_op2[1]) = 0xffffffe0; ++ *((int *)&__m256_op2[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0xffffffa0; ++ *((int *)&__m256_result[6]) = 0x80000001; ++ *((int *)&__m256_result[5]) = 0xffffffe0; ++ *((int *)&__m256_result[4]) = 0x80000001; ++ *((int *)&__m256_result[3]) = 0xffffffa0; ++ *((int *)&__m256_result[2]) = 0x80000001; ++ *((int *)&__m256_result[1]) = 0xffffffe0; ++ *((int *)&__m256_result[0]) = 0x80000001; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x49810081; ++ *((int *)&__m256_op1[6]) = 0x4843ffe1; ++ *((int *)&__m256_op1[5]) = 0x49810081; ++ *((int *)&__m256_op1[4]) = 0x68410001; ++ *((int *)&__m256_op1[3]) = 0x49810081; ++ *((int *)&__m256_op1[2]) = 0x4843ffe1; ++ *((int *)&__m256_op1[1]) = 0x49810081; ++ *((int *)&__m256_op1[0]) = 0x68410001; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00009fff; ++ *((int *)&__m256_op0[6]) = 0x00002001; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x00009fff; ++ *((int *)&__m256_op0[2]) = 0x00002001; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0xfffeb683; ++ *((int *)&__m256_op1[6]) = 0x9ffffd80; ++ *((int *)&__m256_op1[5]) = 0xfffe97c0; ++ *((int *)&__m256_op1[4]) = 0x20010001; ++ *((int *)&__m256_op1[3]) = 0xfffeb683; ++ *((int *)&__m256_op1[2]) = 0x9ffffd80; ++ *((int *)&__m256_op1[1]) = 0xfffe97c0; ++ *((int *)&__m256_op1[0]) = 0x20010001; ++ *((int *)&__m256_op2[7]) = 0x00009fff; ++ *((int *)&__m256_op2[6]) = 0x00002001; ++ *((int *)&__m256_op2[5]) = 0x0000ffff; ++ *((int *)&__m256_op2[4]) = 0x0000ffff; ++ *((int *)&__m256_op2[3]) = 0x00009fff; ++ *((int *)&__m256_op2[2]) = 0x00002001; ++ *((int *)&__m256_op2[1]) = 0x0000ffff; ++ *((int *)&__m256_op2[0]) = 0x0000ffff; ++ *((int *)&__m256_result[7]) = 0xfffeb683; ++ *((int *)&__m256_result[6]) = 0x80002001; ++ *((int *)&__m256_result[5]) = 0xfffe97c0; ++ *((int *)&__m256_result[4]) = 0x8000ffff; ++ *((int *)&__m256_result[3]) = 0xfffeb683; ++ *((int *)&__m256_result[2]) = 0x80002001; ++ *((int *)&__m256_result[1]) = 0xfffe97c0; ++ *((int *)&__m256_result[0]) = 0x8000ffff; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7fffffff; ++ *((int *)&__m256_op0[6]) = 0x80000000; ++ *((int *)&__m256_op0[5]) = 0x7fffffff; ++ *((int *)&__m256_op0[4]) = 0x80000000; ++ *((int *)&__m256_op0[3]) = 0x7fffffff; ++ *((int *)&__m256_op0[2]) = 0x80000000; ++ *((int *)&__m256_op0[1]) = 0x7fffffff; ++ *((int *)&__m256_op0[0]) = 0x80000000; ++ *((int *)&__m256_op1[7]) = 0xfd02fd02; ++ *((int *)&__m256_op1[6]) = 0xfd02fd02; ++ *((int *)&__m256_op1[5]) = 0xfd02fd02; ++ *((int *)&__m256_op1[4]) = 0xfd02fd02; ++ *((int *)&__m256_op1[3]) = 0xfd02fd02; ++ *((int *)&__m256_op1[2]) = 0xfd02fd02; ++ *((int *)&__m256_op1[1]) = 0xfd02fd02; ++ *((int *)&__m256_op1[0]) = 0xfd02fd02; ++ *((int *)&__m256_op2[7]) = 0xfd02fd02; ++ *((int *)&__m256_op2[6]) = 0xfd02fd02; ++ *((int *)&__m256_op2[5]) = 0xfd02fd02; ++ *((int *)&__m256_op2[4]) = 0xfd02fd02; ++ *((int *)&__m256_op2[3]) = 0xfd02fd02; ++ *((int *)&__m256_op2[2]) = 0xfd02fd02; ++ *((int *)&__m256_op2[1]) = 0xfd02fd02; ++ *((int *)&__m256_op2[0]) = 0xfd02fd02; ++ *((int *)&__m256_result[7]) = 0x7fffffff; ++ *((int *)&__m256_result[6]) = 0x7d02fd02; ++ *((int *)&__m256_result[5]) = 0x7fffffff; ++ *((int *)&__m256_result[4]) = 0x7d02fd02; ++ *((int *)&__m256_result[3]) = 0x7fffffff; ++ *((int *)&__m256_result[2]) = 0x7d02fd02; ++ *((int *)&__m256_result[1]) = 0x7fffffff; ++ *((int *)&__m256_result[0]) = 0x7d02fd02; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xbf7f7fff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xe651bfff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0xffffffff; ++ *((int *)&__m256_op2[2]) = 0xf328dfff; ++ *((int *)&__m256_op2[1]) = 0x6651bfff; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x07070707; ++ *((int *)&__m256_op0[5]) = 0x01020400; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00020100; ++ *((int *)&__m256_op0[1]) = 0x07030200; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffff80; ++ *((int *)&__m256_op1[6]) = 0xfefeff00; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x01000400; ++ *((int *)&__m256_op1[3]) = 0xffffff80; ++ *((int *)&__m256_op1[2]) = 0xfeff0000; ++ *((int *)&__m256_op1[1]) = 0x02020080; ++ *((int *)&__m256_op1[0]) = 0x5c800400; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0xffffffff; ++ *((int *)&__m256_op2[2]) = 0xf328dfff; ++ *((int *)&__m256_op2[1]) = 0x6651bfff; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffff80; ++ *((int *)&__m256_result[6]) = 0x46867f79; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xf328dfff; ++ *((int *)&__m256_result[1]) = 0x6651bfff; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xe0000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xe0000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xe0000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xe0000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x80000000; ++ *((int *)&__m256_op1[4]) = 0x80000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0x80000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x7f800000; ++ *((int *)&__m256_op2[6]) = 0x7f800000; ++ *((int *)&__m256_op2[5]) = 0x7fc00000; ++ *((int *)&__m256_op2[4]) = 0x7fc00000; ++ *((int *)&__m256_op2[3]) = 0x7f800000; ++ *((int *)&__m256_op2[2]) = 0x7f800000; ++ *((int *)&__m256_op2[1]) = 0x7fc00000; ++ *((int *)&__m256_op2[0]) = 0x7fc00000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7fc00000; ++ *((int *)&__m256_result[4]) = 0x7fc00000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7fc00000; ++ *((int *)&__m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x7fefffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0x7fefffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x7fefffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0x7fefffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7fefffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0x7fefffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x7fefffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0x7fefffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xf7f8f7f8; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00003f78; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xf7f8f7f8; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00003f78; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0xf7f8f7f8; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00003f78; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0xf7f8f7f8; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00003f78; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0xff800000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0xff800000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0xffffffff; ++ *((int *)&__m256_op2[4]) = 0xffffffff; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0xffffffff; ++ *((int *)&__m256_op2[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x01010100; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000405; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x01010100; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000405; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x01010100; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000405; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x01010100; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000405; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x01010100; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x00000405; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x01010100; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x00000405; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00800080; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000202; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00800080; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000202; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0xff88ff88; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xff88ff88; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0xffc8ff88; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0xffc8ff88; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x001fffff; ++ *((int *)&__m256_op2[6]) = 0xffffffff; ++ *((int *)&__m256_op2[5]) = 0xffffffff; ++ *((int *)&__m256_op2[4]) = 0xffffffff; ++ *((int *)&__m256_op2[3]) = 0x001fffff; ++ *((int *)&__m256_op2[2]) = 0xffffffff; ++ *((int *)&__m256_op2[1]) = 0xffffffff; ++ *((int *)&__m256_op2[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x001fffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x001fffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x7fff8000; ++ *((int *)&__m256_op1[4]) = 0x7fff0000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x7fff8000; ++ *((int *)&__m256_op1[0]) = 0x7fff0000; ++ *((int *)&__m256_op2[7]) = 0xffffffff; ++ *((int *)&__m256_op2[6]) = 0xffffffff; ++ *((int *)&__m256_op2[5]) = 0xffffffff; ++ *((int *)&__m256_op2[4]) = 0xffffff10; ++ *((int *)&__m256_op2[3]) = 0xffffffff; ++ *((int *)&__m256_op2[2]) = 0xffffffff; ++ *((int *)&__m256_op2[1]) = 0xffffffff; ++ *((int *)&__m256_op2[0]) = 0xffffff10; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffff10; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffff10; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c +new file mode 100644 +index 000000000..557f9f8b5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c +@@ -0,0 +1,381 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000080000; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007f7f00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000007f00340040; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000007f000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020200008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000008; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[2]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[0]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op2[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op2[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op2[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op2[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff10; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[3]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff10; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x007f007bfffffffb; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x007f007bfffffffb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000c040c0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000c040c0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe000ffffffff08; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe000ffffffff08; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff10; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c +new file mode 100644 +index 000000000..cdb7b11aa +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c +@@ -0,0 +1,350 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x38a966b31be83ee9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5f6108dc25b8e028; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf41a56e8a20878d7; ++ *((unsigned long *)&__m256i_op0[0]) = 0x683b8b67e20c8ee5; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffcd42ffffecc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000475ffff4c51; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000740dffffad17; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f4bffff7130; ++ *((unsigned long *)&__m256i_result[3]) = 0x38a966b31be83ee9; ++ *((unsigned long *)&__m256i_result[2]) = 0x5f6108dc25b80001; ++ *((unsigned long *)&__m256i_result[1]) = 0xf41a56e8a20878d7; ++ *((unsigned long *)&__m256i_result[0]) = 0x683b8b67e20c0001; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1000000000000000; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0008; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0008; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0008ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0008ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ffffff1e9e9e9e; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff9e9eb09e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ffffff1e9e9e9e; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff9e9eb09e; ++ *((unsigned long *)&__m256i_result[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0xffc00000ffc0ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0xffc00000ffc0ffc0; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000165e0000480d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000165e0000480d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_result[2]) = 0x000016000000480d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_result[0]) = 0x000016000000480d; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe800c0d8fffeeece; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff383efffedf0c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe800c0d8fffeeece; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff383efffedf0c; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xe800c000fffeeece; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff383efffedf0c; ++ *((unsigned long *)&__m256i_result[1]) = 0xe800c000fffeeece; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff383efffedf0c; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000200000000; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff00ffffffff; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c007c0080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c007c0080008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7c00000880008000; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0008b03e457db03e; ++ *((unsigned long *)&__m256i_result[2]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008b03e457db03e; ++ *((unsigned long *)&__m256i_result[0]) = 0x457db03e45a87310; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000008000b; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000008000b; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000b; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000dfffffff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000cfffffff3; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000dfffffff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000cfffffff3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000001000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008000000000000; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01fffe00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01fffe00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x10ffffff10000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x10ffffff10000006; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c +new file mode 100644 +index 000000000..fa4d5fd6f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c +@@ -0,0 +1,620 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000fffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000fffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffb80000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffb80000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000012; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000f0f0003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000f1003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000f0001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000011; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe000ffffffff08; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe000ffffffff08; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001fffffff9; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100002000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00b7003600120000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00b7006200fc0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00b7004100190004; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdb801b6d0962003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9a7f997fff01ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbe632a4f1c3c5653; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffe54affffffd3; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffcfae000000d8; ++ *((unsigned long *)&__m256i_result[1]) = 0x00006681000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffd668ffffa9c6; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff00000bff00000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff00000bff00000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffbff1ffffbff1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffbff1ffffbff1; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff1fffffff1; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffeeffaf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffeeffaf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000051; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000101000000fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000051; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000101000000fff; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff8180ffff8181; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff8180ffff8181; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00feff0100feff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00feff0100feff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007efeff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007efeff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffd017d00; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffe; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffe00; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffc0c0ffffbfc0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffc0c0ffffbfc0; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffeffff10000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffeffff10000000; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0007a861; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c +new file mode 100644 +index 000000000..87c3e25b1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c +@@ -0,0 +1,545 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff70ff01ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff70ff01ff80; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_result[2]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_result[1]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_result[0]) = 0x00c200c200c200bb; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff70; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff70; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00bb; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0057; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff00bb; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0057; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fffa003e; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffb009c; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fffa003e; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffb009c; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6300000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6300000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffc001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000c000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffc001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000c000; ++ __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9ffffd8020010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff9fffffff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9ffffd8020010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff9fffffff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000018ffff2b13; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000018ffff2b13; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00800080ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00800080ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007fe268; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007fe268; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffbfffc; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff00fffffff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffff00; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000055; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000055; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff01010101; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00d6acd7; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff01010101; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00d6acd7; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x120e120dedf1edf2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x120e120dedf1edf2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000120e120d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000120e120d; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000dfffffff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000cfffffff3; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000dfffffff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000cfffffff3; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000000d; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffb5ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0048007f002f0028; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x004a007f002f0028; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x24342434ffff2435; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x24342434ffff2435; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffba8300004fc2; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff8001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffebffffffebfff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffebffffffebfff; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff7eddffff7ed3; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff7edfffff7edf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff7eddffff7ed3; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff7edfffff7edf; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff3eedffff3ee3; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff3eedffff3ee3; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c +new file mode 100644 +index 000000000..c1eda6c6c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c +@@ -0,0 +1,86 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_result[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_result[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ad152a5ad72feeb; ++ __m256i_out = __lasx_xvld ((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_result[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_result[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ad152a5ad72feeb; ++ __m256i_out = __lasx_xvldx ((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[2]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[0]) = 0xebebebebebebebeb; ++ __m256i_out = __lasx_xvldrepl_b ((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0xfeebfeebfeebfeeb; ++ *((unsigned long *)&__m256i_result[2]) = 0xfeebfeebfeebfeeb; ++ *((unsigned long *)&__m256i_result[1]) = 0xfeebfeebfeebfeeb; ++ *((unsigned long *)&__m256i_result[0]) = 0xfeebfeebfeebfeeb; ++ __m256i_out = __lasx_xvldrepl_h ((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0xad72feebad72feeb; ++ *((unsigned long *)&__m256i_result[2]) = 0xad72feebad72feeb; ++ *((unsigned long *)&__m256i_result[1]) = 0xad72feebad72feeb; ++ *((unsigned long *)&__m256i_result[0]) = 0xad72feebad72feeb; ++ __m256i_out = __lasx_xvldrepl_w ((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[2]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ad152a5ad72feeb; ++ __m256i_out = __lasx_xvldrepl_d ((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c +new file mode 100644 +index 000000000..8c8d4996b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c +@@ -0,0 +1,647 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x074132a240000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00007ffe81fdfe03; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[0]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[0]) = 0x555555ab555555ab; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000017f0000017d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000017f0000017f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002e0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002e0000fffe; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000002e0000ffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000002e0000fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000f7bc0001f7bd; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000f93b0000017c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000f7bc0001f7bd; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000f93b0000017b; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1410141014101410; ++ *((unsigned long *)&__m256i_result[2]) = 0x1410141014101410; ++ *((unsigned long *)&__m256i_result[1]) = 0x1410141014101410; ++ *((unsigned long *)&__m256i_result[0]) = 0x1410141014101410; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdb801b6d0962003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000007fff01ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdb8e209d0cce025a; ++ *((unsigned long *)&__m256i_op1[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op1[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffcc8000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000007dfdff4b; ++ *((unsigned long *)&__m256i_result[3]) = 0xdb801b6d0962003f; ++ *((unsigned long *)&__m256i_result[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long *)&__m256i_result[1]) = 0x9a7f997fff01ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xbe632a4f1c3c5653; ++ __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x01010101010000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000004800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000004800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000004800000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000004800000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_result[3]) = 0x7b7b7b7b80000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xcacacb1011040500; ++ *((unsigned long *)&__m256i_result[1]) = 0x7b7b7b7b80000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xcacacb1011040500; ++ __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffefffffffe; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000001a00; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfe7fffecfe7fffec; ++ *((unsigned long *)&__m256i_result[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfe7fffecfe7fffec; ++ *((unsigned long *)&__m256i_result[0]) = 0xff80000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_result[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0xa020202020206431; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ff00000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fff80fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff80fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff80007ffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff007fff80fe; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff457db03f; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fe200000fe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fe200000fe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x001ffffe00200000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x001ffffe00200000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fe200000fe1f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fe200000fe1f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000009e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000009e; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffff0078ffff0078; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffff0078ffff0078; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff8; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c +new file mode 100644 +index 000000000..21446e55e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c +@@ -0,0 +1,530 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff0001ff02; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff020afefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000003fefd; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffefff7fff7; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7ffffffbfffb; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff0001ff02; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff020afefc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000003fefd; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff0001ff04; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff02a0fefc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000cfefd; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff01ff010000fff9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff19; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff02ff020001fffa; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000100010001fffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x807f807f00000380; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007380; ++ *((unsigned long *)&__m256i_result[1]) = 0xc03fc03f000001c0; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001c0; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffe40; ++ __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000fedd; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000fedd; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000fedd; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000fedd; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x80be0000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x80be0000ffffffff; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000457d607d; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff457d607f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000457d607d; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff457d607f; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07ffffff07ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x07ffffff07ffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x07ffffff07ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x07ffffff07ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0ffffffe0ffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ffffffe0ffffffe; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_result[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_result[0]) = 0x001fc0200060047a; ++ __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ca0000fff80000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ca0000fff80000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x381800007af80000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x381800007af80000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000086fe0000403e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000403e00004040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000086fe0000403e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000403e00004040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000086fe0000403e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000403e00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000086fe0000403e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000403e00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001bfa000000f9; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000f900004040; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001bfa000000f9; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000f900004040; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0607ffff0383; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0607ffffc0c1; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0607ffff0383; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0607ffffc0c1; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007f433c79; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007f433c79; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007f8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007f8000; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffdfff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffdfff80; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c +new file mode 100644 +index 000000000..c1b8e1752 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c +@@ -0,0 +1,394 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbea2e127c046721f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1729c073816edebe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xde91f010000006f9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ef1f90efefaf30d; ++ *((unsigned long *)&__m256i_result[3]) = 0x515f93f023600fb9; ++ *((unsigned long *)&__m256i_result[2]) = 0x948b39e0b7405f6f; ++ *((unsigned long *)&__m256i_result[1]) = 0x48ef087800007c83; ++ *((unsigned long *)&__m256i_result[0]) = 0x78af877c7d7f86f9; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7fff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f007f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f7f7f7f7fff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[2]) = 0xbfbfbfbfbfff807f; ++ *((unsigned long *)&__m256i_result[1]) = 0xbf803fbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[0]) = 0xbfbfbfbfbfff807f; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002a54290; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000007f0000007f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000007f0000007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01ff80ff01ff80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff01ff800000007e; ++ *((unsigned long *)&__m256i_result[3]) = 0x003f8000003f8000; ++ *((unsigned long *)&__m256i_result[2]) = 0x003f8000003f8000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc07f80ffc07f80; ++ *((unsigned long *)&__m256i_result[0]) = 0xffc07f80003f0000; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x24); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff6f20; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff6f20; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x4343434343434343; ++ *((unsigned long *)&__m256i_result[2]) = 0x4343434343434343; ++ *((unsigned long *)&__m256i_result[1]) = 0x4343434343434343; ++ *((unsigned long *)&__m256i_result[0]) = 0x4343434343434343; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x38); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffdffd; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffdffd; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffdffd; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffdffd; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000f0000000f000; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000007fc00000400; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000040000000400; ++ *((unsigned long *)&__m256i_result[1]) = 0x000007fc00000400; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000040000000400; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00f7000000f70006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00f7000000f70006; ++ __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffbfffffffb; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c +new file mode 100644 +index 000000000..3c5e775ff +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c +@@ -0,0 +1,102 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0; ++ __lasx_xvst (__m256i_op0, (unsigned long *)&__m256i_result, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_op0, __m256i_result); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0; ++ __lasx_xvstx (__m256i_op0, (unsigned long *)&__m256i_result, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_op0, __m256i_result); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0; ++ *((unsigned long *)&__m256i_result[0]) = 0x8d; ++ *((unsigned long *)&__m256i_out[3]) = 0x0; ++ *((unsigned long *)&__m256i_out[2]) = 0x0; ++ *((unsigned long *)&__m256i_out[1]) = 0x0; ++ *((unsigned long *)&__m256i_out[0]) = 0x0; ++ __lasx_xvstelm_b (__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0; ++ *((unsigned long *)&__m256i_result[0]) = 0x9100; ++ *((unsigned long *)&__m256i_out[3]) = 0x0; ++ *((unsigned long *)&__m256i_out[2]) = 0x0; ++ *((unsigned long *)&__m256i_out[1]) = 0x0; ++ *((unsigned long *)&__m256i_out[0]) = 0x0; ++ __lasx_xvstelm_h (__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0; ++ *((unsigned long *)&__m256i_result[0]) = 0xe9179100; ++ *((unsigned long *)&__m256i_out[3]) = 0x0; ++ *((unsigned long *)&__m256i_out[2]) = 0x0; ++ *((unsigned long *)&__m256i_out[1]) = 0x0; ++ *((unsigned long *)&__m256i_out[0]) = 0x0; ++ __lasx_xvstelm_w (__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0; ++ *((unsigned long *)&__m256i_result[0]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_out[3]) = 0x0; ++ *((unsigned long *)&__m256i_out[2]) = 0x0; ++ *((unsigned long *)&__m256i_out[1]) = 0x0; ++ *((unsigned long *)&__m256i_out[0]) = 0x0; ++ __lasx_xvstelm_d (__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvhadd-xvhaddw-xv.patch b/LoongArch-Add-tests-for-ASX-vector-xvhadd-xvhaddw-xv.patch new file mode 100644 index 0000000000000000000000000000000000000000..8bf92a09a03c53ea49f49192dc776da69eb9693a --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvhadd-xvhaddw-xv.patch @@ -0,0 +1,6930 @@ +From 03f7a61fa5efb197cdd66014552aa8727677b891 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 15:19:28 +0800 +Subject: [PATCH 100/124] LoongArch: Add tests for ASX vector + xvhadd/xvhaddw/xvmaddwev/xvmaddwod instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmadd.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvhaddw-1.c | 560 +++++++++++ + .../loongarch/vector/lasx/lasx-xvhaddw-2.c | 650 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvmadd.c | 742 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmaddwev-1.c | 856 ++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmaddwev-2.c | 723 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmaddwev-3.c | 940 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmaddwod-1.c | 742 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmaddwod-2.c | 799 +++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmaddwod-3.c | 820 +++++++++++++++ + 9 files changed, 6832 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c +new file mode 100644 +index 000000000..1cf0ec698 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c +@@ -0,0 +1,560 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7ffffffffffff1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbffffffffffffeff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7ffffffffffff1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbffffffffffffeff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff6fffefffe005b; ++ *((unsigned long *)&__m256i_result[2]) = 0xffbefffefffe005a; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff6fffefffe005b; ++ *((unsigned long *)&__m256i_result[0]) = 0xffbefffefffe005a; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000060000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000060000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000060000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000060000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fffffffefffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xff7fffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fffffffefffe; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000023; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff8fffffff8ffff; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffecffffffec; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffefffffffe; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffc000400780087; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fe80fffc0183; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffc000400f8ff87; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff80ff00ff7c0183; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff900000800; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffc00000078; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffc000000f8; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff790000077c; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007ff000000000; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff1fffffff1; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000001ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000001ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000f6ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000f6ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000017f0000017d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000017f0000017f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000017f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000017f; ++ __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000017000000080; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffefef800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffefef800; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000400010004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000400010004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000400010004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000400010004; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffffffff; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c +new file mode 100644 +index 000000000..14ec081a4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000015d050192cb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x028e509508b16ee9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000033ff01020e23; ++ *((unsigned long *)&__m256i_op0[0]) = 0x151196b58fd1114d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001ffaa0000040e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000716800007bb6; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001ffe80001fe9c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000228200001680; ++ *((unsigned long *)&__m256i_result[3]) = 0x000100ab000500a0; ++ *((unsigned long *)&__m256i_result[2]) = 0x000200b800080124; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001011b000200aa; ++ *((unsigned long *)&__m256i_result[0]) = 0x00150118008f0091; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001341c4000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001000310000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007f7f00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000007f00340040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000007f000000ff; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000180007f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffafaf80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000180007f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffafaf80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe01ae00ff00ff; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0102; ++ *((unsigned long *)&__m256i_result[2]) = 0x007c000000810081; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0102; ++ *((unsigned long *)&__m256i_result[0]) = 0x007c000000810081; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffc0003fffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffc0003fffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007fc0083fc7c007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007fc0083fc7c007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f010700c70106; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f010700c70106; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0010000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0010000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000000010000; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000002a5; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000002a5; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0e0e0e0e0e0e0e0e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000e0e0e0e0e0e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff8fff9000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff8fff9000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff8fff9000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00010e0d00009e0e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00009000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000e0e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00009000; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000300000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000300000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000300000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000300000004; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0501030102141923; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffd5020738b43ddb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x010200023b8e4174; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff4ff4e11410b40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000019410000e69a; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf259905a09c23be0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000883a00000f20; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6d3c2d3a89167aeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000501e99b; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000109973de7; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001020f22; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001890b7a39; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007f000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff0000; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001fff9fff8; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffff81ffffeb2f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f6ee0570b4e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000018de; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffb4ffcec0f1; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffff81ffffeb2f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f6ee0570b4e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000018de; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffb4ffcec0f1; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001ffffeab0; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000e0574abc; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000018de; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001ffcec0a5; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffe367cc82f8989a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4f90000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffc3aaa8d58f43c8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000082f8989a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000d58f43c8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000170017; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004411; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000236200005111; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000175e0000490d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000236200005111; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000175e0000490d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000002362; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000175d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000002362; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000175d; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000ff00; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100003ffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100003fcd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100003ffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100003fcd; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffefefffffefe; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x9090909090909090; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000f; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xf3f3f3f3f3f3f4f3; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xf3f3f3f3f3f3f4f3; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000800080008000; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000001fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000001ce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000001fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000001ce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000001fd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001fd; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfa15fa15fa15fa14; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c +new file mode 100644 +index 000000000..f9634b128 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c +@@ -0,0 +1,742 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x34ec5670cd4b5ec0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4f111e4b8e0d7291; ++ *((unsigned long *)&__m256i_op1[1]) = 0xeaa81f47dc3bdd09; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0e0d5fde5df99830; ++ *((unsigned long *)&__m256i_op2[3]) = 0x80c72fcd40fb3bc0; ++ *((unsigned long *)&__m256i_op2[2]) = 0x84bd087966d4ace0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x26aa68b274dc1322; ++ *((unsigned long *)&__m256i_op2[0]) = 0xe072db2bb9d4cd40; ++ *((unsigned long *)&__m256i_result[3]) = 0x044819410d87e69a; ++ *((unsigned long *)&__m256i_result[2]) = 0x21d3905ae3e93be0; ++ *((unsigned long *)&__m256i_result[1]) = 0x5125883a30da0f20; ++ *((unsigned long *)&__m256i_result[0]) = 0x6d7b2d3ac2777aeb; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffff1f; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffff1f; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffeff; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fffe00010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fffe00010001; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000607f700000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000607f700000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000002e0000ffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000002e0000fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_result[2]) = 0x000607f700000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_result[0]) = 0x000607f700000001; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000003f00000000; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x370036db92c4007e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x371462137c1e0049; ++ *((unsigned long *)&__m256i_op0[1]) = 0x800000fe7e02fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x371c413b999d04b5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffb5ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffff00ff00ffff00; ++ *((unsigned long *)&__m256i_op2[2]) = 0xff000000ff00ff00; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffff00ffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xff00000000ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x37fe365b920d007e; ++ *((unsigned long *)&__m256i_result[2]) = 0x381462137d1e0149; ++ *((unsigned long *)&__m256i_result[1]) = 0x80ff00fe7e020060; ++ *((unsigned long *)&__m256i_result[0]) = 0x381c413b99cd04dd; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op1[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_op2[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op2[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op2[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_result[3]) = 0xd100645944100004; ++ *((unsigned long *)&__m256i_result[2]) = 0xd1908469108400d1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000404040104; ++ *((unsigned long *)&__m256i_result[0]) = 0xd1108199714910f9; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x61f1000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0108000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x61f1a18100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0108000000000000; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000055555555; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000055555555; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000004; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2aaaaaaa2aaaaaab; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x2aaaaaaa2aaaaaab; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[0]) = 0x7c007c007c007c00; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fd00ffff02fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fffeff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00007f7f00007f00; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0100; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ff050000ff3c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000fff90000ff78; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffa80000ff31; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0101010127272525; ++ *((unsigned long *)&__m256i_op2[2]) = 0x23a2a121179e951d; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0101010127272525; ++ *((unsigned long *)&__m256i_op2[0]) = 0x23a2a121179e951d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvmadd_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fefffffffffffff; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x008e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x008e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0007ffff0007ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0007ffff0007ffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x008e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x008e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007000008e700000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007000008e700000; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op2[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op2[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op2[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100000100000001; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvmadd_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000080040; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00009fff00002001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00009fff00002001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0001497c98ea4fca; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0001497c98ea4fca; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000006715b036; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000006715b036; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007f80; ++ __m256i_out = __lasx_xvmadd_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f80ffffff808000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f80ffffff808000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe0ffe000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fa0001fff808000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe0ffe000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fa0001fff808000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c +new file mode 100644 +index 000000000..6238685bc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c +@@ -0,0 +1,856 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000100000000; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff8fff8fff8fff8; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op2[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f7f8f7f8; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000003f78; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f7f8f7f8; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003f78; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f7f8f7f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000003f78; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f7f8f7f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000003f78; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002001800ff0078; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01f8007001f80070; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002001800ff0078; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01f8007001f80070; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0218ff78fc38fc38; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0218ff78fc38fc38; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256i_result[3]) = 0x00300b40fc001678; ++ *((unsigned long *)&__m256i_result[2]) = 0xfc00000000001f80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00300b40fc001678; ++ *((unsigned long *)&__m256i_result[0]) = 0xfc00000000001f80; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe8440000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe8440000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffe8440000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffe8440000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe8440000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffe8440000; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_op1[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op1[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_result[1]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffefffefffefffef; ++ __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000e0e0e0e0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xebfd15f000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01700498ff8f1600; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf520c7c024221300; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00802fd0ff540a80; ++ *((unsigned long *)&__m256i_op1[3]) = 0xebfd15f000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01700498ff8f1600; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf520c7c024221300; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00802fd0ff540a80; ++ *((unsigned long *)&__m256i_op2[3]) = 0xf96d674800000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x44a4330e2c7116c0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x14187a7822b653c0; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfbe0b866962b96d0; ++ *((unsigned long *)&__m256i_result[3]) = 0xebfd15f000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x015c6a7facc39600; ++ *((unsigned long *)&__m256i_result[1]) = 0xfa070a51cbd95300; ++ *((unsigned long *)&__m256i_result[0]) = 0x00c7463075439280; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0555550000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0555550000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_result[3]) = 0x0555550000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0555550000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80938013800d8002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x807e80fd80fe0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80938013800d0005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffff00001fff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffff00001fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long *)&__m256i_result[2]) = 0x80938013800d8002; ++ *((unsigned long *)&__m256i_result[1]) = 0x807e80fd80fe0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x80938013800d0005; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_op1[2]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_op1[0]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_result[2]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_result[0]) = 0x556caad9aabbaa88; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff7f7f7fff7fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff7f7f7fff7fffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3f7f7f7eff800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3f7f7f7eff800000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffeffffffdd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x002affaa00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffeffffffdd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffdc; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001000b000b; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001000b000b; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op2[3]) = 0x2020080800000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000004044f4f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0ef11ae55a5a6767; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_result[2]) = 0x6040190d20227a78; ++ *((unsigned long *)&__m256i_result[1]) = 0x132feeabd2d33b38; ++ *((unsigned long *)&__m256i_result[0]) = 0x6040190d00000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x3); ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000fe0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000fe0000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000118; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000118; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff7fff7fff; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1086658a18ba3594; ++ *((unsigned long *)&__m256i_op0[2]) = 0x160fe9f000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1086658a18ba3594; ++ *((unsigned long *)&__m256i_op0[0]) = 0x160fe9f000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op1[1]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_result[3]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_result[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_result[1]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_result[0]) = 0x4df5b1a3ed5e02c1; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000100000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fffffffe000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fffffffe000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x01fffffffe000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x01fffffffe000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfe00000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x817f11ed81800ff0; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00000000ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00000000ffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000ffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000ffffff; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000101000001010; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff80000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c +new file mode 100644 +index 000000000..5fa080375 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c +@@ -0,0 +1,723 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f7f7f80; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0200000002000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0200000002000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff01fb0408; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff01fb0408; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00003cfc0000006f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00003cfc0000006f; ++ *((unsigned long *)&__m256i_result[3]) = 0x02007f8002000400; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000c5dc02005f64; ++ *((unsigned long *)&__m256i_result[1]) = 0x02007f8002000400; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000c5dc02005f64; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000700020004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000700020004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000070002000a; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff874dc687870000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x41dfffc000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x41dfffdfffc00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0001fbf9fbe29f52; ++ *((unsigned long *)&__m256i_op2[2]) = 0x5b409c0000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0001fbf9fbe29f52; ++ *((unsigned long *)&__m256i_op2[0]) = 0x5b409c0000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfbba01c0003f7e3f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_result[1]) = 0xfbd884e7003f7e3f; ++ *((unsigned long *)&__m256i_result[0]) = 0xff874dc687870000; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000403f3fff; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40effc0000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40effc0000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00007f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00010003fc827a86; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00007f7f7f7f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f017fc0ddbf7d86; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00153f1594ea02ff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffffffff0100; ++ *((unsigned long *)&__m256i_op2[0]) = 0xff15c1ea95ea02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xc06e7c817f7e8081; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000bd3f016f177a; ++ *((unsigned long *)&__m256i_result[1]) = 0xc06e7c8100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x60c485800178147a; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000011f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000011f; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000192540; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000192540; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff88ff88; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op2[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op2[0]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffdf5b000041b0; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000fb8000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000fb8000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x807f807f00000380; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007380; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc03fc03f000001c0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000001c0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[3]) = 0x807f807f00000380; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007380; ++ *((unsigned long *)&__m256i_result[1]) = 0xc03fc03f000001c0; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001c0; ++ __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff80ff00ff80ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff80ff00ff80ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x007f00ff007f00fe; ++ *((unsigned long *)&__m256i_op2[2]) = 0xf711ee11f711ee91; ++ *((unsigned long *)&__m256i_op2[1]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xf711ee11f711ee11; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff80ff00ff80ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff80ff00ff80ff01; ++ __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01fe01fe00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000045ff740023; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000045ff740023; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000000155b200; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000b70000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000016e00; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000001e001e001e0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000001e001e001e0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c +new file mode 100644 +index 000000000..40549448e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c +@@ -0,0 +1,940 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0003ff540000081c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0003ffd00003fd38; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001ffaa0000040e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000716800007bb6; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001ffe80001fe9c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000228200001680; ++ *((unsigned long *)&__m256i_op2[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_op2[2]) = 0xc5c085372cfabfba; ++ *((unsigned long *)&__m256i_op2[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long *)&__m256i_result[3]) = 0x002e4db200000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000315ac0000d658; ++ *((unsigned long *)&__m256i_result[1]) = 0x00735278007cf94c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0003ed8800031b38; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff01ff01ff01; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff0001ff04; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff02a0fefc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000cfefd; ++ *((unsigned long *)&__m256i_op1[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff7fffbfefa; ++ *((unsigned long *)&__m256i_result[2]) = 0xff1eff1902a0fea4; ++ *((unsigned long *)&__m256i_result[1]) = 0xff10000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff10fff9ff13fd17; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfafafafafafafafa; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fefefe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xf9fbf9fbf9fbf9fb; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0xfdfffdfffdfffdff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff01fffffdff; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000627; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000627; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000003fff3fff; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ff00ff00ef32; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffffffffdd97dc4; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfffffffffdd97dc4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010100f10100fd4; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op0[2]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op0[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op0[0]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[2]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[0]) = 0xebebebebebebebeb; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x34ec5670cd4b5ec0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4f111e4b8e0d7291; ++ *((unsigned long *)&__m256i_op0[1]) = 0xeaa81f47dc3bdd09; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0e0d5fde5df99830; ++ *((unsigned long *)&__m256i_op1[3]) = 0x67390c19e4b17547; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbacda0f96d2cec01; ++ *((unsigned long *)&__m256i_op1[1]) = 0xee20ad1adae2cc16; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5a2003c6a406fe53; ++ *((unsigned long *)&__m256i_op2[3]) = 0x80c72fcd40fb3bc0; ++ *((unsigned long *)&__m256i_op2[2]) = 0x84bd087966d4ace0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x26aa68b274dc1322; ++ *((unsigned long *)&__m256i_op2[0]) = 0xe072db2bb9d4cd40; ++ *((unsigned long *)&__m256i_result[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_result[2]) = 0x5464fbfc416b9f71; ++ *((unsigned long *)&__m256i_result[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_result[0]) = 0x0d8264202b8ea3f0; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff0000ffff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff000000ffffff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffffffff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01fa022a01a401e5; ++ *((unsigned long *)&__m256i_op1[2]) = 0x030d03aa0079029b; ++ *((unsigned long *)&__m256i_op1[1]) = 0x024c01f901950261; ++ *((unsigned long *)&__m256i_op1[0]) = 0x008102c2008a029f; ++ *((unsigned long *)&__m256i_op2[3]) = 0x002e4db200000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000315ac0000d658; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00735278007cf94c; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0003ed8800031b38; ++ *((unsigned long *)&__m256i_result[3]) = 0x01a72334ffff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xff4f6838ff937648; ++ *((unsigned long *)&__m256i_result[1]) = 0x00a2afb7fff00ecb; ++ *((unsigned long *)&__m256i_result[0]) = 0xffce110f004658c7; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003a099512; ++ *((unsigned long *)&__m256i_op0[1]) = 0x280ac9da313763f5; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe032c738adcc6bbf; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0001000100020001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000fffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003a099512; ++ *((unsigned long *)&__m256i_result[1]) = 0x280ac9da313763f5; ++ *((unsigned long *)&__m256i_result[0]) = 0xe032c738adcc6bbf; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000045f3fb; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000045f3fb; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00010003; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001f0000ffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000060008; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000000c005b; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfffffffffffe0000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000040053; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff0007fff7; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff005affa4; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffe100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000053ffac; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5fff5fff607f0000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100004300000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100004300000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op2[2]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op2[0]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x01ffff4300ffff00; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000001000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffb8579f; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00b200b300800080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00b200b300800080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_result[2]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_result[1]) = 0x00b200b300800080; ++ *((unsigned long *)&__m256i_result[0]) = 0x00b200b300800080; ++ __m256i_out ++ = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000404040; ++ __m256i_out ++ = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1fa0000000080000; ++ __m256i_out ++ = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out ++ = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202020201010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202020201010000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long *)&__m256i_op2[0]) = 0xff49fe4200000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffe928f1313c9cc; ++ *((unsigned long *)&__m256i_result[0]) = 0x4244020201010000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[0]) = 0x0005000500050005; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op2[3]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long *)&__m256i_op2[2]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000003f3f3f3f; ++ *((unsigned long *)&__m256i_op2[0]) = 0x3f3f3f3f00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_result[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_result[0]) = 0x8787878a00000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op2[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8787878a00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe3; ++ *((unsigned long *)&__m256i_result[2]) = 0x63636344c3c3c4f6; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffc3; ++ *((unsigned long *)&__m256i_result[0]) = 0xc3c3c500fffffff6; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffbfffcffeffff0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffbfffcffeffff0; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000b0cfffff4f3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000f9bb562f56c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000b0cfffff4f3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000f9bb562f56c80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op2[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op2[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0018761ed60b5d7f; ++ *((unsigned long *)&__m256i_result[2]) = 0xabdcdc9938afafe9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0018761ed60b5d7f; ++ *((unsigned long *)&__m256i_result[0]) = 0xabdcdc9938afafe9; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c +new file mode 100644 +index 000000000..683876933 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c +@@ -0,0 +1,742 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op2[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_result[3]) = 0x1031146010201020; ++ *((unsigned long *)&__m256i_result[2]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_result[1]) = 0x1031146010201020; ++ *((unsigned long *)&__m256i_result[0]) = 0x1020102010201020; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x94d7fb5200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x94d7fb5200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x94d7fb5200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x94d7fb5200000000; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020000010201; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000020000010201; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000020000010201; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000020000010201; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op2[2]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op2[0]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000017e; ++ __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f3fc6c68787; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f87870000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003e3ec6c68686; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fffffeff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003e3e87870000; ++ __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_op0[2]) = 0x019d00a2003a0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_op0[0]) = 0x019d00a2003a0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_result[2]) = 0x019d00a20039fff9; ++ *((unsigned long *)&__m256i_result[1]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_result[0]) = 0x019d00a2003a0000; ++ __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000003cc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000003cc0; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000003cc0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000003cc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007fff7fff7fff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000f3280000dfff; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff90ffffff80; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3fffffffff7f0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3fffffffff7f0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000c7aff7c00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000c7aff7c00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000002030000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x030303670101fd90; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000002030000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x030303670101fd90; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3ffffffffc7bfc99; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3ffffffffc7bfc99; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0200000202000002; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf800f800f800c000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf800f800f800a000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff00ffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0001000100010000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x020afefb08140000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf800f7fff8ffc0ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xf8fff7fff7ffa000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_result[0]) = 0xf800f800f800e000; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c +new file mode 100644 +index 000000000..f9f88b654 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c +@@ -0,0 +1,799 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000ffff8000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff80008000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800080008000b8f1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x074132a240000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000ffff8000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x06f880008000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800080008000b8f1; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000012481e4950; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000001658166830; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1f60010000080100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1f60010000080100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1f60010000080100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1f60010000080100; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op2[3]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_result[2]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_result[1]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_result[0]) = 0xbf3ffffffffeffed; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0ff80100ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ff80100ffffffff; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x34000000fff00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff6e00000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3380000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x363c0000fff3c000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffb7146213; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffc1e0049; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffb71c413b; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf3317da580000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x34000000fff00000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff6e00000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3380000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x363c0000fff3c000; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe000ffffffffff; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01c03f8034c03200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3dc02b400a003400; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01c03f8034c03200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3dc02b400a003400; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01c03f8034c03200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3dc02b400a003400; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01c03f8034c03200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3dc02b400a003400; ++ *((unsigned long *)&__m256i_op2[3]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op2[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op2[0]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_result[3]) = 0x01ce3c0050d32d40; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fadafc013acf600; ++ *((unsigned long *)&__m256i_result[1]) = 0x01ce3c0050d32d40; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fadafc013acf600; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffecffffffec; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffffefdfffffefd; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffff7d80000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000100; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000001fdfffffe02; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000001fefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff01fefffeff02; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000001fdfffffe02; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000001fefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff01fefffeff02; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffdfffffffdffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffddffdeffb5ff8d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffdfffffffdffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffddffdeffb5ff8d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffeeffaf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffeeffaf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfffdfffffffdffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffddffdeffb5ff8d; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfffdfffffffdffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffddffdeffb5ff8d; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffcffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0febedc9bb95dd8f; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffcffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0febedc9bb95dd8f; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000545400; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000545400; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff7bfffff1; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff80007fe9; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff7bfffff1; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff80007fe9; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010511c54440437; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010511c54440437; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000103fca1bd; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000103fca1bd; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000103fca1bd; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000103fca1bd; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010511c54440438; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010511c54440438; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01fe01fe00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000045ff740023; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000045ff740023; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000000155b200; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000b70000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000016e00; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000001e001e001e0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000001e001e001e0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c +new file mode 100644 +index 000000000..5210e4cf9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c +@@ -0,0 +1,820 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff01ff01ff01; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000b8f81b8c850f4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000b8f81b8c850f4; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000b8f81b8c850f4; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000b8f81b8c850f4; ++ *((unsigned long *)&__m256i_result[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_result[2]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_result[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_result[0]) = 0x000b2673a90896a4; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc03fffffffc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffc00000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc03fffffffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffc00000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc600000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff000003c0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff000003c0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7c030000ffc4; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7c030000ffc4; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00f7000000f70006; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00f7000000f70006; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbff0000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0002fffeffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0002fffeffff; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000505; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000627; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000627; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1f60000000c00000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1f60000000c00000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000627; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000627; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x437f201f201f2020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x037f201f001f2020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x437f201f201f2020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x037f201f001f2020; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x21bb481000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01bf481000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x21bb481000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01bf481000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000ffffff1dff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffff1dffffff1dff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000ffffff1dff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffff1dffffff1dff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff8001ffff0001; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffe40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00f9f90079f9f9f9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x79f9f9f900000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00f9f90079f9f9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x79f9f9f900000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff8c80; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffe40; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000089; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffeffffffdd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffdc; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfbff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfbff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfbff0000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfbff0000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0101010101010110; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0101010101010110; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202020201010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202020201010000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long *)&__m256i_op2[0]) = 0xff49fe4200000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffe928f1313c9cc; ++ *((unsigned long *)&__m256i_result[0]) = 0x4244020201010000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[0]) = 0x0005000500050005; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op2[3]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long *)&__m256i_op2[2]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000003f3f3f3f; ++ *((unsigned long *)&__m256i_op2[0]) = 0x3f3f3f3f00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_result[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_result[0]) = 0x8787878a00000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op2[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8787878a00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe3; ++ *((unsigned long *)&__m256i_result[2]) = 0x63636344c3c3c4f6; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffc3; ++ *((unsigned long *)&__m256i_result[0]) = 0xc3c3c500fffffff6; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffbfffcffeffff0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffbfffcffeffff0; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000b0cfffff4f3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000f9bb562f56c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000b0cfffff4f3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000f9bb562f56c80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op2[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op2[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0018761ed60b5d7f; ++ *((unsigned long *)&__m256i_result[2]) = 0xabdcdc9938afafe9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0018761ed60b5d7f; ++ *((unsigned long *)&__m256i_result[0]) = 0xabdcdc9938afafe9; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvldi-xvmskgez-xv.patch b/LoongArch-Add-tests-for-ASX-vector-xvldi-xvmskgez-xv.patch new file mode 100644 index 0000000000000000000000000000000000000000..100fe7e0db748f1b4fa5dc9ad6c3a62cfea33985 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvldi-xvmskgez-xv.patch @@ -0,0 +1,2735 @@ +From 8d8564be4eaa8134acab6a184da36f3620a82f6f Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 15:39:31 +0800 +Subject: [PATCH 104/124] LoongArch: Add tests for ASX vector + xvldi/xvmskgez/xvmskltz/xvmsknz/xvmuh /xvsigncov instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvldi.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvldi.c | 83 +++ + .../loongarch/vector/lasx/lasx-xvmskgez.c | 86 +++ + .../loongarch/vector/lasx/lasx-xvmskltz.c | 373 ++++++++++ + .../loongarch/vector/lasx/lasx-xvmsknz.c | 163 +++++ + .../loongarch/vector/lasx/lasx-xvmuh-1.c | 650 +++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmuh-2.c | 635 +++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvsigncov.c | 665 ++++++++++++++++++ + 7 files changed, 2655 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c +new file mode 100644 +index 000000000..84b3c6599 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c +@@ -0,0 +1,83 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvldi (-4080); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0xfebcfebcfebcfebc; ++ *((unsigned long *)&__m256i_result[2]) = 0xfebcfebcfebcfebc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfebcfebcfebcfebc; ++ *((unsigned long *)&__m256i_result[0]) = 0xfebcfebcfebcfebc; ++ __m256i_out = __lasx_xvldi (1724); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fd1000000000000; ++ __m256i_out = __lasx_xvldi (-943); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvldi (1820); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7200000072000000; ++ __m256i_out = __lasx_xvldi (-3214); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0xffffff1dffffff1d; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff1dffffff1d; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffff1dffffff1d; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff1dffffff1d; ++ __m256i_out = __lasx_xvldi (2845); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvldi (-4080); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fd1000000000000; ++ __m256i_out = __lasx_xvldi (-943); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7200000072000000; ++ __m256i_out = __lasx_xvldi (-3214); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c +new file mode 100644 +index 000000000..15e66ae38 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c +@@ -0,0 +1,86 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff00; ++ __m256i_out = __lasx_xvmskgez_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmskgez_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskgez_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskgez_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000fafe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000fafe; ++ __m256i_out = __lasx_xvmskgez_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmskgez_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c +new file mode 100644 +index 000000000..53b21f98b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c +@@ -0,0 +1,373 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3922d40000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000c85221c0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7ebfab800000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000f20; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000009f0; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x40d74f979f99419f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000022; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1f9d9f9d1f9db29f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1f9d9f9d201cb39e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x201c9f9d201cb29f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1f9d9f9d201cb39e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007773; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000003373; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0049ffd2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00630064004bffd0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe0f02081c1c4ce2c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8008000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe0f02081c1c4ce2c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8008000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000b8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000b8; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000022; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000088; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000088; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x296e000018170000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x296e000018170000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000404; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000404; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffc000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffeff000c057c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffc000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffeff000c057c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000f0f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000f0f0; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000008c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000008c; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000cc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000cc; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x5); ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000055; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000054; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c +new file mode 100644 +index 000000000..81865fd32 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c +@@ -0,0 +1,163 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000005555; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000005555; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000300000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000300000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004411; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000033; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000f91; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000f91; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000001f; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x006018000000001a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0060401900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x006018000000001a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0060401900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000006170; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000006170; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000002ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000002ff; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c +new file mode 100644 +index 000000000..58ad8bfcd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000007ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000fdfcfda8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000e2821d20ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000fdfcfda8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000e2821d20ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ff8000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ff8000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc5c085372cfabfba; ++ *((unsigned long *)&__m256i_op0[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000019410000e69a; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf259905a0c126604; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000883a00000f20; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6d3c2d3aa1c82947; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000f647000007d6; ++ *((unsigned long *)&__m256i_result[2]) = 0x031b358c021ee663; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000faaf0000f9f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x02b4fdadfa9704df; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf7ffffffffffff1f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbffffffffffffeff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf7ffffffffffff1f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbffffffffffffeff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffe05fc47b400; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffe06003fc000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffe05fc47b400; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffe06003fc000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff8900000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff8900000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff5556aaaa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff5556aaaa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000001fff0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000feff0001ffb8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000001fff0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000feff0001ffb8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op1[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000007fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000036a37; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000007fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000004def9; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffe0001; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_result[2]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_result[1]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_result[0]) = 0x000408080c111414; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c +new file mode 100644 +index 000000000..85d24fe44 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c +@@ -0,0 +1,635 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfe8bfe0efe8bfe12; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfe8bfe0efe8bfe12; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000027; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op0[1]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00020002ff820002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00020002ff820002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40efffe09fa88260; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6b07ca8e013fbf01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40efffe09fa7e358; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80ce32be3e827f00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x86ff76ffff4eff42; ++ *((unsigned long *)&__m256i_op1[2]) = 0x86ffffffffff9eff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x86ff76ffff4effff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x86ff32ffaeffffa0; ++ *((unsigned long *)&__m256i_result[3]) = 0x223d76f09f3881ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long *)&__m256i_result[1]) = 0x223d76f09f37e357; ++ *((unsigned long *)&__m256i_result[0]) = 0x43ec0a1b2aba7ed0; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffc020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffc020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001400000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001400000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5fa0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5fa0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0f00204000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0f00204000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x04a3000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x04a3000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff8000fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00017fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff8000fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe00017fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000007f00fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000fe0000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000007f00fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000fe0000007f; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffe00000ffe00000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffe00000ffe00000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000fafe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000fafe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff01c000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000f1000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000001341c4000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001000310000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000033e87ef1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000002e2100; ++ __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000045f3fb; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000045f3fb; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7575ffff75757595; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7575ffff7575f575; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7575ffff75757595; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7575ffff7575f575; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3aadec4f6c7975b1; ++ *((unsigned long *)&__m256i_result[2]) = 0x3abac5447fffca89; ++ *((unsigned long *)&__m256i_result[1]) = 0x3aadec4f6c7975b1; ++ *((unsigned long *)&__m256i_result[0]) = 0x3abac5447fffca89; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000010000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000010000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c +new file mode 100644 +index 000000000..2a6eee0fd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c +@@ -0,0 +1,665 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op0[2]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op0[1]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op0[0]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202810102020202; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202810102020202; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fefe0000fefe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff0000fefe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fefe0000fefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff0000fefe; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000017547fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000017547fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80938013800d8002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x807e80fd80fe0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80938013800d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000801380f380fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000801380f300fb; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffd5a98; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000101ff01; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff80ff00ff80ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff80ff00ff80ff01; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fd; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff000000010000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3880800037800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3901000039010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3880800037800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3901000039010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fc00000428a; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000001ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000001ffff; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80be0000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80be0000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000100000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff00000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_result[3]) = 0x2080208020802080; ++ *((unsigned long *)&__m256i_result[2]) = 0x203e208020802079; ++ *((unsigned long *)&__m256i_result[1]) = 0x2080208020802080; ++ *((unsigned long *)&__m256i_result[0]) = 0x203e208020802079; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000004e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffba8300004fc2; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x004100df00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00c000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x004100df00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00c000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc1d75053f0000000; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffa30000165a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000104000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffa30000165a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000104000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xbe21000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000505300000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xbe21000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000505300000000; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000001880310877e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000001880310877e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001faf19b60; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6c2905ae7c14c561; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001faf19b60; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6c2905ae7c14c561; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x94d7fb5200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x94d7fb5200000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000180; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000003fffc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000003fffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffc00040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffc00040; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffdbff980038ffaf; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffafffe80004fff1; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffdbff980038ffaf; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffafffe80004fff1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffc; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000fffd0003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffc; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000fffd0003; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0000fffd0004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0000fffd0004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0002fffd; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvmax-xvmaxi-xvmi.patch b/LoongArch-Add-tests-for-ASX-vector-xvmax-xvmaxi-xvmi.patch new file mode 100644 index 0000000000000000000000000000000000000000..63f6aa074cf72a4a5b037c44b140b2d79a6fff84 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvmax-xvmaxi-xvmi.patch @@ -0,0 +1,4124 @@ +From 00deb43164bce9740d6e2e103afce647bebc6ee3 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 15:31:02 +0800 +Subject: [PATCH 103/124] LoongArch: Add tests for ASX vector + xvmax/xvmaxi/xvmin/xvmini instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvmax-1.c | 545 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmax-2.c | 560 +++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmaxi-1.c | 471 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvmaxi-2.c | 504 +++++++++++++ + .../loongarch/vector/lasx/lasx-xvmin-1.c | 575 +++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmin-2.c | 680 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmini-1.c | 416 +++++++++++ + .../loongarch/vector/lasx/lasx-xvmini-2.c | 284 ++++++++ + 8 files changed, 4035 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c +new file mode 100644 +index 000000000..96c6671f2 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c +@@ -0,0 +1,545 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f0000007f000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f0000007f000000; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff000000; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff000000000000; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000400040004; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5900000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5900000000000000; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ffce20; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ffce20; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ee1100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000004560408; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ee1100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000004560408; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000004560420; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000004560420; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f433c78; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000400010004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000400010004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000e0001000e; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1090918800000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1090918800000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c80780000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c80780000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1c80780000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1c80780000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004000; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007f7f817f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007f7f817f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffebeeaaefafb; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffebeeaaeeeeb; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffebeeaaefafb; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffebeeaaeeeeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op0[2]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op0[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op0[0]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff00; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff00007fff0000; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_op1[2]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_op1[0]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_result[2]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_result[0]) = 0x556caad9aabbaa88; ++ __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000020006; ++ __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe97c020010001; ++ __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c +new file mode 100644 +index 000000000..38f2c0afe +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c +@@ -0,0 +1,560 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffdfffffffdfffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffdfffffffdfffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ff050000ff3c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000fff90000ff78; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffa80000ff31; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x817f11ed81800ff0; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fe000000000; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800000ff800000ff; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffeeffaf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffeeffaf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffffffeeffaf; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffeeffaf; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010100f10100fd4; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_result[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_result[0]) = 0xf0f0f0f0f0f0f0f0; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc600000000000000; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000d0d8ffffeecf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000d0d8ffffeecf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffd8ffc7ffffdf0d; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffd8ffc7ffffdf0d; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000014402080144; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f433c78; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff97a2; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000070002000a; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffcf800fffcf800; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffcf800fffcf800; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc7418a023680; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff8845bb954b00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffc7418a023680; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff8845bb954b00; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000002a5429; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003f800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000040404040; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000001400; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffff08a7de0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffff07c4170; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffff08a7de0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffff07c4170; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffff08a7de0; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffff07c4170; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffff08a7de0; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffff07c4170; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c +new file mode 100644 +index 000000000..e804a0a45 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c +@@ -0,0 +1,471 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffd10000006459; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000441000000004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000040400000104; ++ *((unsigned long *)&__m256i_result[3]) = 0x0f0f0f0f0f0f6459; ++ *((unsigned long *)&__m256i_result[2]) = 0x0f0f44100f0f0f0f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0f0f0f0f0f0f0f0f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0f0f0f0f0f0f0f0f; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8080808180808093; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80808081808080fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8080808180808093; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80808081808080fb; ++ *((unsigned long *)&__m256i_result[3]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long *)&__m256i_result[2]) = 0xf5f5f5f5f5f5f5fe; ++ *((unsigned long *)&__m256i_result[1]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long *)&__m256i_result[0]) = 0xf5f5f5f5f5f5f5fb; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[2]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[1]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[0]) = 0x0909090909090909; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0a0a0a0a7f0a0a0a; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0707070707070707; ++ *((unsigned long *)&__m256i_result[2]) = 0x0707070707070707; ++ *((unsigned long *)&__m256i_result[1]) = 0x0707070707070707; ++ *((unsigned long *)&__m256i_result[0]) = 0x0707070707070707; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[3]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[2]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[1]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[0]) = 0x2a2a2a2a2a2a2a2a; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0c0c0c0c0c0c0c0c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0c0c0c0c0c0c0c0c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0c0c0c0c0c0c0c0c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0c0c0c0c0c0c0c0c; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[0]) = 0x0005000500050005; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffc00000ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffc00000ffc0ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff90000fff9fff9; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00040000; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00080008000801ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_result[1]) = 0x00080008000801ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008000800080008; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000c9; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000008000165a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000008000165a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0009000900090009; ++ *((unsigned long *)&__m256i_result[2]) = 0x000900090009165a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0009000900090009; ++ *((unsigned long *)&__m256i_result[0]) = 0x000900090009165a; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_result[3]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_result[2]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_result[0]) = 0x000a000a000a000a; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000401000000; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0110000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0110000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0110000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0110000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0110000000000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0110000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0110000000000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0110000000000080; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000e0000000e; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff400000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff400000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000900000009; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000081f20607a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000081f20607a; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000004560420; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000004560420; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000004560420; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000004560420; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_result[2]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_result[0]) = 0x7e00000000000000; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff5; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007b007e; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c +new file mode 100644 +index 000000000..b6b34063c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c +@@ -0,0 +1,504 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0a0a0a0a0a0a0a0a; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_result[2]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_result[1]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_result[0]) = 0x1717171717171717; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe00007f000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1616161616161616; ++ *((unsigned long *)&__m256i_result[2]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ffe16167f161616; ++ *((unsigned long *)&__m256i_result[0]) = 0x161616167fffffff; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000feb60000b7d0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000feb60000c7eb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000feb60000b7d0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000feb60000c7eb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0707feb60707c7eb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0707feb60707c7eb; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_result[2]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_result[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_result[0]) = 0x1111111111111111; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_result[3]) = 0x1818ffff1818ffa3; ++ *((unsigned long *)&__m256i_result[2]) = 0x181818181818185a; ++ *((unsigned long *)&__m256i_result[1]) = 0x1818ffff1818ffa3; ++ *((unsigned long *)&__m256i_result[0]) = 0x181818181818185a; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[2]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[0]) = 0x1c1c1c1c1c1c1c1c; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xeffc000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf064c6098d214127; ++ *((unsigned long *)&__m256i_op0[1]) = 0xeffc000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf064c6098d214127; ++ *((unsigned long *)&__m256i_result[3]) = 0xeffc001800180018; ++ *((unsigned long *)&__m256i_result[2]) = 0xf064c6098d214127; ++ *((unsigned long *)&__m256i_result[1]) = 0xeffc001800180018; ++ *((unsigned long *)&__m256i_result[0]) = 0xf064c6098d214127; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[2]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0007000700070007; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_result[2]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_result[1]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_result[0]) = 0x0018001800180018; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0017001700176d6d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0017001700176d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0017001700176d6d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0017001700176d6d; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x001fffffffe00011; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x001fffffffe00011; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001400000014; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000e00000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000e00000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000e00000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000e00000080; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000fd0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fd0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001b00fd0000; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000c7aff7c00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000c7aff7c00; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffd017d00; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001f0000ffff; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000300000003; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1010101010001000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1010101000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010101010001000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x101010100000000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001e0007ffff; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000004000000fd; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000004000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000400000004; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000001f; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001700000017; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001700000017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001700000017; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_result[3]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_result[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_result[1]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_result[0]) = 0x07fed3c8f7ad28d0; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000001e; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000001e; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[0]) = 0x1c1b1a191c1b1a19; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000001c; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000001c; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000001c; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000001c; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffe; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000b; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000013; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000014; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000014; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000014; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000014; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000014; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000014; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c +new file mode 100644 +index 000000000..7dbf335c1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c +@@ -0,0 +1,575 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8001000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000800080000728; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8001800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x800080008000b8f1; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000ffff8000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff80008000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800080008000b8f1; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000180007fe8; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_result[3]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_result[2]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_result[1]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_result[0]) = 0x8800c800c800c801; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff00ffff8000; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x003ff18080010201; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x003ff18080010201; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000f18080010000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000f18080010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000d24; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000fffe; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff81ff7d; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000017f7f7f7f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000017f7f7f7f; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000004040104; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffd1108199; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000714910f9; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffd10000006459; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000441000000004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000040400000104; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffd10000000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffd1108199; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000104; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fd00ffff02ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fffeff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff02ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0100; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00fe00feff02ff; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff1f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffe1ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff1f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffe1ffe0; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf000f00000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf000f00000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xf000f00000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xf000f00000000001; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007c000000810081; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007c000000810081; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffbfffffffb; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c +new file mode 100644 +index 000000000..9eaa0e9e7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c +@@ -0,0 +1,680 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f017f807f017d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f017f807f017f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000017f0000017d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000017f0000017f; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f70000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002080100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002080100; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff0000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff0000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000001de2dc20; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000001de2dc20; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffe651bfff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000010100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000001000100; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1d1d1d1ddd9d9d1d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1d1d1d1d046fdd1d; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001d1d00001d1d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00001d1d00007f79; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001d1d00001d1d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001d1d0000dd1d; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00011ffb0000bee1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00011ffb0000bee1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001010600000106; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001010600000106; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffd5d5ffffd5d6; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffd5d5ffffd5d6; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc58a0a0a07070706; ++ *((unsigned long *)&__m256i_op1[2]) = 0x006b60e4180b0023; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1b39153f334b966a; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf1d75d79efcac002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x006b60e40e0e0e0e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x36722a7e66972cd6; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000101ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00040000; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00010e0d00009e0e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00009000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000e0e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00009000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000033; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00f0000000f00010; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff0ff00fff0ff10; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00f0000000f00010; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff0ff00fff0ff10; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000400100004001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000400100004001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000400100004001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000400100004001; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x60f02081c1c4ce2c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8008000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x60f02081c1c4ce2c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8008000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010183f9999b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01010101d58f43c9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010183f9999b; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x01010101d58f43c9; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ee; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f00ff007f00ff; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000001fffe; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c +new file mode 100644 +index 000000000..01aabada8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c +@@ -0,0 +1,416 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf96d674800000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x44a4330e2c7116c0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x14187a7822b653c0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfbe0b866962b96d0; ++ *((unsigned long *)&__m256i_result[3]) = 0xf90c0c0c00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0ca40c0c0c0c0cc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0c0c0c0c0cb60cc0; ++ *((unsigned long *)&__m256i_result[0]) = 0xfbe0b80c960c96d0; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010bfc80010bf52; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff1bfca0011bfcb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010bfc80010bf52; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff1bfca0011bfcb; ++ *((unsigned long *)&__m256i_result[3]) = 0xf5f5bfc8f5f5bff5; ++ *((unsigned long *)&__m256i_result[2]) = 0xf5f1bfcaf5f5bfcb; ++ *((unsigned long *)&__m256i_result[1]) = 0xf5f5bfc8f5f5bff5; ++ *((unsigned long *)&__m256i_result[0]) = 0xf5f1bfcaf5f5bfcb; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m256i_result[2]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m256i_result[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m256i_result[0]) = 0xf8f8f8f8f8f8f8f8; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000aaabffff; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff47b4ffff5878; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000b84b0000a787; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff47b4ffff5878; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000b84b0000a787; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff07b4ffff0707; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000b8070000a787; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff07b4ffff0707; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000b8070000a787; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_result[2]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_result[1]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_result[0]) = 0xf3f3f3f3f3f3f3f3; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc30e0000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc30e0000ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc3030000ff800000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc3030000ff800000; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff6fff6fff6fff6; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1fffffff1fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0383634303836343; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1fffffff1fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0383634303836343; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002ffff0002ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002ffff0002ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000200020002; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000f7bc0001f7bd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000f93b0000017c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000f7bc0001f7bd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000f93b0000017b; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff2f7bcfff2f7bd; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff2f93bfff2fff2; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff2f7bcfff2f7bd; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff2f93bfff2fff2; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff9fff9fff9fff9; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff3fff3fff3fff3; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff2fff2fff2fff2; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff2fff2fff2fff2; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff2fff2fff2fff2; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff2fff2fff2fff2; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000400000004; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff0e400; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff2fffffff2; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff2fffffff2; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff2fffffff2; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff2fffffff2; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000000a; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff8fffffff8; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff8fffffff8; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff8fffffff8; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff8fffffff8; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff7fffffff7; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff0fffffff0; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x327f010101010102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x327f010101010102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff4; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ff007f007f00; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c +new file mode 100644 +index 000000000..8eb7d9355 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c +@@ -0,0 +1,284 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d0d00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d0d00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1b1b1b1b1b1b1b1b; ++ *((unsigned long *)&__m256i_result[2]) = 0x1b1b1b1b1b1b1b1b; ++ *((unsigned long *)&__m256i_result[1]) = 0x1b1b1b1b1b1b1b1b; ++ *((unsigned long *)&__m256i_result[0]) = 0x1b1b1b1b1b1b1b1b; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1e1e1e0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1e1e1e0000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1e1e1e0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1e1e1e0000000000; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0a0a0a0a00000000; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008001c0010001c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008001c0010001c; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0007000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0007000000000000; ++ __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003ddd80007bbb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003ddd80007bbb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001700170017; ++ __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffe400000707; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000af100001455; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffe400000707; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000af100001455; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff61010380; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff61010380; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000006; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvmul-xvmod-xvdiv.patch b/LoongArch-Add-tests-for-ASX-vector-xvmul-xvmod-xvdiv.patch new file mode 100644 index 0000000000000000000000000000000000000000..c50e4292b0594174252ecaca1e8c8f08384df525 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvmul-xvmod-xvdiv.patch @@ -0,0 +1,5766 @@ +From 95ce2bef98ebcebebcdb3a9411d1c9783935ac89 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 15:23:35 +0800 +Subject: [PATCH 102/124] LoongArch: Add tests for ASX vector xvmul/xvmod/xvdiv + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmul.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvdiv-1.c | 485 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvdiv-2.c | 500 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmod-1.c | 395 +++++++++++ + .../loongarch/vector/lasx/lasx-xvmod-2.c | 410 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvmul.c | 620 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmulwev-1.c | 590 +++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmulwev-2.c | 590 +++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmulwev-3.c | 605 +++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmulwod-1.c | 545 +++++++++++++++ + .../loongarch/vector/lasx/lasx-xvmulwod-2.c | 470 +++++++++++++ + .../loongarch/vector/lasx/lasx-xvmulwod-3.c | 440 +++++++++++++ + 11 files changed, 5650 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c +new file mode 100644 +index 000000000..0d7c67703 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c +@@ -0,0 +1,485 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00080000000cc916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000006fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00f8000000f41bfb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000fa0106; ++ __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000fe000000fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000fe000000fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000fe000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000fe000000fe; ++ __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe8001b72e0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xb72e8001b72eaf12; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe000247639d9c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb5308001b72eaf12; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_result[1]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_result[0]) = 0xffb5ff80ffd0ffd8; ++ __m256i_out = __lasx_xvdiv_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8091811081118110; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80a6802680208015; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8091811081110013; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80a6802680200018; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8091811081118110; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80a6802680208015; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8091811081110013; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80a6802680200018; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvdiv_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x1f831f80e0e09f86; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x1f831f80e0e09f86; ++ __m256i_out = __lasx_xvdiv_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffa080000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffe080000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffa080000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffe080000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fd00ffff02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fffeff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff81ffffff00; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000d000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000d000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000583800; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000583800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d0000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffff30000000b; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffff30000000b; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007fef; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffe; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c +new file mode 100644 +index 000000000..fd8b6d38c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c +@@ -0,0 +1,500 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202810102020202; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202810102020202; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256i_op1[3]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_op1[1]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0303030303020000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0303030303020000; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100010001000100; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9fe7fffffffff32e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6040190ddfdd8587; ++ *((unsigned long *)&__m256i_op1[1]) = 0xecd011542d2cc4c7; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6040190dffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f7fff7f7f7fff7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7fff7f7f7fff7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f7fff7f7f7fff7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7fff7f7f7fff7f; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001010000; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe2e2e202ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe2e2e202ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010202020203; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010201010102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010202020203; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010201010102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00020001ffb6ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0049004200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000f3280000dfff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffb7; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004c00000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000003fb000003fb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000003fb000003fb; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1fe01e0100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1fe01e0100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1fe01e0100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1fe01e0100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff827f80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0226823c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff827f80; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0226823c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007d003e007d003e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007d003effa80010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007d003e007d003e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007d003effa80010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000008000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000008000000100; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9cffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9cffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000013b13380; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000013b13380; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000e2e20000e2e2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00011d1c00011d9c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000e2e20000e2e2; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00011d1c00011d9c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000e2e20000e2e2; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00011d1c00011d9c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000e2e20000e2e2; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00011d1c00011d9c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1b1a191817161514; ++ *((unsigned long *)&__m256i_op1[1]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1b1a191817161514; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000101; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010202020203; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010201010102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010202020203; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010201010102; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c +new file mode 100644 +index 000000000..6f34f6ffc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c +@@ -0,0 +1,395 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvmod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8001b72e0001b72e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8001b72eaf12d5f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000247639d9cb530; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8001b72eaf12d5f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0xff81ffe50001ffe5; ++ *((unsigned long *)&__m256i_result[2]) = 0xff81ffe5ffa6ffc6; ++ *((unsigned long *)&__m256i_result[1]) = 0x000200aafe9affe5; ++ *((unsigned long *)&__m256i_result[0]) = 0xff81ffe5ffa6ffc6; ++ __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[2]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[0]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x80008000b70fb810; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3c0f3c0f3911b910; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80008000b70fb810; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3c0f3c0f3911b910; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff6f20; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000781e0000f221; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff6f20; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000781e0000f221; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000101010001; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc3030000ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc3030000ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000800080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc9d8080067f50020; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc70000020000c000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000010100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000001000100; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007efeff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007efeff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000008e7c00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000067751500; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000008e7c00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000067751500; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_result[1]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffefffefffefffef; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c +new file mode 100644 +index 000000000..d0a9e9d2f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c +@@ -0,0 +1,410 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1e18000000000000; ++ __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffe000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffe000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000e000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000e000; ++ __m256i_out = __lasx_xvmod_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe06df8d7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffbe8b470f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe06df0d7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffbe8b470f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_result[3]) = 0x081abb9d36ee1037; ++ *((unsigned long *)&__m256i_result[2]) = 0x1617eb17129bfd38; ++ *((unsigned long *)&__m256i_result[1]) = 0x081abb9d36ee1037; ++ *((unsigned long *)&__m256i_result[0]) = 0x1617eb17129bfd38; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfe8bfe0efe8bfe12; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfe8bfe0efe8bfe12; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000005500000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001005500020000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000005500000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001005500020000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000100010001fffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000100010001fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000005500000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000005400000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000005500000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000005400000002; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007f0000ff807f81; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007f0000ff807f81; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff8000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff000000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000095120000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc9da000063f50000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc7387fff6bbfffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1fffffff1fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0383634303836343; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1fffffff1fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0383634303836343; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001000000; ++ __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c +new file mode 100644 +index 000000000..be3c8e718 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c +@@ -0,0 +1,620 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffd1b24e00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcea54ffff29a8; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff8cad88ff8306b4; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffc1278fffce4c8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0e2d5626ff75cdbc; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5db4b156e2002a78; ++ *((unsigned long *)&__m256i_op1[1]) = 0xeeffbeb03ba3e6b0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0c16e25eb28d27ea; ++ *((unsigned long *)&__m256i_result[3]) = 0xf96d674800000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x44a4330e2c7116c0; ++ *((unsigned long *)&__m256i_result[1]) = 0x14187a7822b653c0; ++ *((unsigned long *)&__m256i_result[0]) = 0xfbe0b866962b96d0; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffff01ffffff08; ++ *((unsigned long *)&__m256i_op1[2]) = 0x43700f0100003008; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffff01ffffff08; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43700f0100003008; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000f8; ++ *((unsigned long *)&__m256i_result[2]) = 0xbc8ff0ffffffcff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000f8; ++ *((unsigned long *)&__m256i_result[0]) = 0xbc8ff0ffffffcff8; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x353bb67af686ad9b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x353bb67af686ad9b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0200000200000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2c27000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0200000200000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2c27000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1cfd000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000180000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffd2; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff8000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000080000000; ++ __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fff003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fff003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000627; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000627; ++ __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffd5a98; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffd5a98; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007f3a40; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x120e120dedf1edf2; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x120e120dedf1edf2; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010000010100000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010000010100000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010000010100000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010000010100000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007fff00000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0040000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fff00000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffefffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffff30000000b; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffff30000000b; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbc30c40108a45423; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbc263e0e5d00e69f; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbc30c40108a4544b; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbc20e63aa8b9663f; ++ *((unsigned long *)&__m256i_result[3]) = 0x71860bf35f0f9d81; ++ *((unsigned long *)&__m256i_result[2]) = 0x720ed94a46f449ed; ++ *((unsigned long *)&__m256i_result[1]) = 0x71860bf35f0f9f39; ++ *((unsigned long *)&__m256i_result[0]) = 0x72544f0e6e95cecd; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x111ebb784f9c4100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c386546809f3b50; ++ *((unsigned long *)&__m256i_op0[1]) = 0x111ebb784f9bf1ac; ++ *((unsigned long *)&__m256i_op0[0]) = 0x21f6050d955d3f68; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbab0c4b000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xaa0ac09800000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00bf00bf00bf00bf; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00bf00bf00bf00bf; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00bf00bf00bf00bf; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00bf00bf00bf00bf; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000088; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000088; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00f8000000000008; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000800f800000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00f8000000000008; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000800f800000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe3f7fff7fffcbd08; ++ *((unsigned long *)&__m256i_result[2]) = 0x0dbfa28000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xe3f7fff7fffcbd08; ++ *((unsigned long *)&__m256i_result[0]) = 0x0dbfa28000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_result[2]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_result[1]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_result[0]) = 0x7070545438381c1c; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c +new file mode 100644 +index 000000000..01ff71649 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c +@@ -0,0 +1,590 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf96d674800000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x44a4330e2c7116c0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x14187a7822b653c0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfbe0b866962b96d0; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffd1b24e00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffcea54ffff29a8; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff8cad88ff8306b4; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffc1278fffce4c8; ++ *((unsigned long *)&__m256i_result[3]) = 0xebfd15f000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01700498ff8f1600; ++ *((unsigned long *)&__m256i_result[1]) = 0xf520c7c024221300; ++ *((unsigned long *)&__m256i_result[0]) = 0x00802fd0ff540a80; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffff90ffffff81; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffff90ffffff81; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001dc; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ee; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffce; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000fc7c; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffce; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000fc7c; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf800f800f800c000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf800f800f800a000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff00ffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff8080000004000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000080000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff8080000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00001ff800000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd8d8c00000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00001ff800000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd8d8c00000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3f80000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3f80000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffff8ffffff08; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00f800ffcff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffff8ffffff08; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00f800ffcff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256i_result[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256i_result[0]) = 0x0045b8ae81bce1d8; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000060000108; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001060005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fef0001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x201fdfe0201fdfe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x201fdfe0201fdfe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000fd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000062d4; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c +new file mode 100644 +index 000000000..32088f4ae +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c +@@ -0,0 +1,590 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4e5cba76cdbaaa78; ++ *((unsigned long *)&__m256i_op0[2]) = 0xce68fdeb4e33eaff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4e45cc2dcda41b30; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4ccb1e5c4d6b21e4; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x44bb2cd3a35c2fd0; ++ *((unsigned long *)&__m256i_result[0]) = 0xca355ba46a95e31c; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202000002020202; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202000002010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202000002020202; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202000002020000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe000000ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe000001fe0000; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff01ff68; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000070ff017de6; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff01ff68; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000070ff017de6; ++ *((unsigned long *)&__m256i_op1[3]) = 0x761ed60b5d7f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdc9938afafe904f1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x761ed60b5d7f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdc9938afafe904f1; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00004c9000e9d886; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00004c9000e9d886; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffff328dfff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6651bfff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffff328dfff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6651bfff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffe0001c3fe4001; ++ *((unsigned long *)&__m256i_result[0]) = 0x8ffe800100000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff01ff010000fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff19; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff02ff020001fffa; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000100010001fffa; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x00fe01ff0006ffcf; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000e62f8f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00fe02fe0006ffd6; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000006ffd6; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01010101010000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0100feff0100eeef; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000001010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0100feff00feef11; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000001010; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x201fdfe0201fdfe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x201fdfe0201fdfe0; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff47b4ffff5878; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000b84b0000a787; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff47b4ffff5878; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000b84b0000a787; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010100000101; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff1b00e4; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[3]) = 0x0807f7f80807f7f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0807f7f80807f7f8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0807f7f80807f7f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0807f7f80807f7f8; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000004e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000a; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007f000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0408040800008003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0408040800008003; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0408040800008002; ++ *((unsigned long *)&__m256i_result[0]) = 0xfbf7fbf7ffff7ffd; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc192181230000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc192181230000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x04080c1014182d35; ++ *((unsigned long *)&__m256i_result[2]) = 0x716d696573765161; ++ *((unsigned long *)&__m256i_result[1]) = 0x04080c1014182d35; ++ *((unsigned long *)&__m256i_result[0]) = 0x716d696573765161; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c +new file mode 100644 +index 000000000..19157f682 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c +@@ -0,0 +1,605 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01fe02; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01fe02; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000505; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256i_op1[3]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80000000ffff8c80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80000000fff0e400; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff01c000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000f1000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01fe04; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01fe04; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000022ffdd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000022ffdd; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000f4b6ff23; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000f4b6ff23; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fffe00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fffe00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x5fa0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x5fa0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f20; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000009f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op1[3]) = 0x417e01f040800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x299d060000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x417e01f040800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x29108b0000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_result[3]) = 0x0707b7cff8f84830; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000354ad4c28; ++ *((unsigned long *)&__m256i_result[1]) = 0x0707b7cff8f84830; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000354ad4c28; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00d5007f00ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00d5007f00ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ef; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000155b200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000b70000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ff03fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffec75c2d209f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ff03fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffec75c2d209f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000008b; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff010000008b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[3]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op1[2]) = 0x03acfc5303260e80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op1[0]) = 0x03acfc5303260e80; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000b0cfffff4f3; ++ *((unsigned long *)&__m256i_result[2]) = 0x000f9bb562f56c80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000b0cfffff4f3; ++ *((unsigned long *)&__m256i_result[0]) = 0x000f9bb562f56c80; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000ff7f1080ef8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0100000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000ff7f1080ef8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0100000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x003ff18080010201; ++ *((unsigned long *)&__m256i_result[2]) = 0x0100000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x003ff18080010201; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfbba01c0003f7e3f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfbd884e7003f7e3f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff874dc687870000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe367cc82f8989a; ++ *((unsigned long *)&__m256i_result[2]) = 0x4f90000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc3aaa8d58f43c8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffdfffffffdfffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffdfffffffdfffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020000000200001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020000000200001; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffff2; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1010101010001000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x101010100000000e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000fe; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff01feffff01ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff01feffff01ff; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5fa0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5fa0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c +new file mode 100644 +index 000000000..80fdcda63 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c +@@ -0,0 +1,545 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7a7cad6eca32ccc1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7a7cad6efe69abd1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7a7cad6eca32ccc1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7a7cad6efe69abd1; ++ *((unsigned long *)&__m256i_result[3]) = 0xff86005300360034; ++ *((unsigned long *)&__m256i_result[2]) = 0xff86005300020055; ++ *((unsigned long *)&__m256i_result[1]) = 0xff86005300360034; ++ *((unsigned long *)&__m256i_result[0]) = 0xff86005300020055; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2c27000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x2c27000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000007f3a40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000007f3a40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000d24; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000073333333; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000073333333; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fffe00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fffe00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ca0000fff80000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ca0000fff80000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010080; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000015d050192cb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x028e509508b16ee9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000033ff01020e23; ++ *((unsigned long *)&__m256i_op0[0]) = 0x151196b58fd1114d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff0000ffff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff000000ffffff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffffffff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fffffaff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffd7200fffff74f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000702f; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000808; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0408040800008003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x04080408fff87803; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0707b7cff8f84830; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000354ad4c28; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0707b7cff8f84830; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000354ad4c28; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffd5a98; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000007e8080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007e8092; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000007e8080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000007e8092; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe07de080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000001f20607a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe07de080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000001f20607a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00153f1594ea02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffff0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff15c1ea95ea02ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfe7ffffffeffffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfe7ffffffeffffc0; ++ __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000017fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000017fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c +new file mode 100644 +index 000000000..1a4b221fe +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c +@@ -0,0 +1,470 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01480000052801a2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffdcff64; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbea2e127c046721f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1729c073816edebe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xde91f010000006f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5ef1f90efefaf30d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00170000028500de; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fd02f20d; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4ffc3f7800000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3fc03f6400000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4ffc3f7800000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3fc03f6400000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x4eb13ec100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3ec13ec100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x4eb13ec100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3ec13ec100000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfe01fe01fd02fd02; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_result[1]) = 0xfe01fe01fd02fd02; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000003fc03fc0; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x04f104f104f104f1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x04f104f104f104f1; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0ff8010000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ff8010000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00f800f800f800f8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0018181800181818; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00f800f800f800f8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0018181800181818; ++ *((unsigned long *)&__m256i_result[3]) = 0x001f1f3e3e1f1f00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0003060909060300; ++ *((unsigned long *)&__m256i_result[1]) = 0x001f1f3e3e1f1f00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0003060909060300; ++ __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x90007fff90008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0ffffffe90008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4800408ef07f7f01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0800000eeffffe02; ++ __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fc03e000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fc03e000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fffb0402fddf20; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fffb0402fddf20; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001fbf9fbe29f52; ++ *((unsigned long *)&__m256i_result[2]) = 0x5b409c0000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001fbf9fbe29f52; ++ *((unsigned long *)&__m256i_result[0]) = 0x5b409c0000000000; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256i_op0[0]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000043efffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000043efffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbfffa004fffd8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbfffa004fffd8000; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001ff03fe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffec75c2d209f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001ff03fe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffec75c2d209f; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0002fffc; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000fffd0003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0002fffc; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000fffd0003; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c +new file mode 100644 +index 000000000..9fcd3ce0c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c +@@ -0,0 +1,440 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00aa000000ac00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00aa000000ac00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf01010153a10101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf01010153a10101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007ffe81fdfe03; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000017f00007f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007f0000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fd; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff810000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x71860bf35f0f9d81; ++ *((unsigned long *)&__m256i_op0[2]) = 0x720ed94a46f449ed; ++ *((unsigned long *)&__m256i_op0[1]) = 0x71860bf35f0f9f39; ++ *((unsigned long *)&__m256i_op0[0]) = 0x72544f0e6e95cecd; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff8910ffff7e01; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff3573ffff8960; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff8910ffff1ca9; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffff5e5ffff8130; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffcb423a587053; ++ *((unsigned long *)&__m256i_result[2]) = 0x6d46f43e71141b81; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffcb423a584528; ++ *((unsigned long *)&__m256i_result[0]) = 0x9bdf36c8d78158a1; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x800000007fff0001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80000000ff7f0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x800000007fff0001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80000000ff7f0001; ++ *((unsigned long *)&__m256i_result[3]) = 0xbfffffffffff8000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbfff800080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xbfffffffffff8000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbfff800080000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000007f8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000002de; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000007f8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000002de; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000007f7; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffff808; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000007f7; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffff808; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvpackev-xvpackod.patch b/LoongArch-Add-tests-for-ASX-vector-xvpackev-xvpackod.patch new file mode 100644 index 0000000000000000000000000000000000000000..efb9490f8527dba0776e79df28d64b0b294865ed --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvpackev-xvpackod.patch @@ -0,0 +1,5364 @@ +From 9789698300a07a107bf78cd1c7fb9cf8fbddfca1 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 17:07:28 +0800 +Subject: [PATCH 119/124] LoongArch: Add tests for ASX vector + xvpackev/xvpackod/xvpickev/xvpickod/ + xvpickve2gr/xvreplgr2vr/xvreplve/xvreplve0/xvreplvei/xvshuf4i/xvshuf + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvpackev.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvpackod.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvpickev.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvpickod.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvpickve.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvreplve.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvpackev.c | 501 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvpackod.c | 575 +++++++++++++ + .../loongarch/vector/lasx/lasx-xvpickev.c | 515 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvpickod.c | 530 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvpickve.c | 130 +++ + .../loongarch/vector/lasx/lasx-xvpickve2gr.c | 388 +++++++++ + .../loongarch/vector/lasx/lasx-xvreplgr2vr.c | 380 +++++++++ + .../loongarch/vector/lasx/lasx-xvreplve.c | 536 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvreplve0.c | 471 +++++++++++ + .../loongarch/vector/lasx/lasx-xvreplvei.c | 20 + + .../loongarch/vector/lasx/lasx-xvshuf4i_b.c | 430 ++++++++++ + .../loongarch/vector/lasx/lasx-xvshuf_b.c | 761 ++++++++++++++++++ + 12 files changed, 5237 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c +new file mode 100644 +index 000000000..33b96d657 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c +@@ -0,0 +1,501 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x81f7f2599f0509c2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x51136d3c78388916; ++ *((unsigned long *)&__m256i_op1[3]) = 0x044819410d87e69a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x21d3905ae3e93be0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x5125883a30da0f20; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6d7b2d3ac2777aeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x000019410000e69a; ++ *((unsigned long *)&__m256i_result[2]) = 0xf259905a09c23be0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000883a00000f20; ++ *((unsigned long *)&__m256i_result[0]) = 0x6d3c2d3a89167aeb; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4f7fffbf0000fe00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000004f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4f7fffe64f7fffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff49fe4200000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffbf0000fe000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fe020000fe22; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe6fe42ffc00000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00550000ffab0001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00550000ffab0001; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000400000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000400000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000400000000; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01fe01fe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe01fe00000000; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ffffffffff; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000089; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0080010000800100; ++ *((unsigned long *)&__m256i_result[2]) = 0x00c0000000c00000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080010000800100; ++ *((unsigned long *)&__m256i_result[0]) = 0x00c0000000c00000; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000001fdfffffe02; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000001fefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff01fefffeff02; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fd00ffff02ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001fffeff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00fe00feff02ff; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffee0000004c0000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff050000ff3c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00f9000000780000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffa80000ff310000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001d0000001d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001d0000001d00; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fe000000000; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x6); ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ff890000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff790000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ff890000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff790000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff790000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff790000; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfbff0000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfbff0000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffe700000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffe7007b007e; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffe700000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffe7007b007e; ++ __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000008000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0003fffc0803fff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000008000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc0803fff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fffc0000fff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fffc0000fff8; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c +new file mode 100644 +index 000000000..cdd20e881 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c +@@ -0,0 +1,575 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f057f0b7f5b007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7fff7fff7fff00; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ff0fff005f0f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ff0fff005f0f; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff000607f7; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010017e7d1; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff000607f7; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001001807f1; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002555500000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002555500000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000005400; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000005400; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000007fff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000007fff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0007fff8000ffff0; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ff00ff00ff00; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000f0000000f000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000022be22be; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fffa2bea2be; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000022be22be; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fffa2bea2be; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff10000fff10000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1000000000000; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff0000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff0000000000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000555500005555; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000555500005555; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000555500005555; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000555500005555; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fff80000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fff80000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fff80000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff80000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff00007fff; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c +new file mode 100644 +index 000000000..66faa74d0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffff90ffffff81; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffff90ffffff81; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff90ff81; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff90ff81; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000007f; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffe81; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe81; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001341c4000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001000310000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000033e87ef1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000002e2100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000011c00; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000e8f1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000103100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000002e00; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000004290; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004290; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000004290; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004290; ++ __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202031; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202031; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf800d0d8ffffeecf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf800d0d8ffffeecf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0xd0d8eecf383fdf0d; ++ __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf80ff20df80ff20; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf80ff20df80ff20; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x80208020c22080a7; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x80208020c22080a7; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op1[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op1[0]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xe07de0801f20607a; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800080010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800080010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000800080010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000800080010000; ++ __m256i_out = __lasx_xvpickev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x9ffffd8020010001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff9fffffff9; ++ *((unsigned long *)&__m256i_result[1]) = 0x9ffffd8020010001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff9fffffff9; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000060002000a; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000060002000a; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c +new file mode 100644 +index 000000000..a9778809f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c +@@ -0,0 +1,530 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0003f8040002f607; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002728b00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffff328dfff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6651bfff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003f8040002f607; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffff328dfff; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0080200000802000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080200000802000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00200020ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x1e0000001e000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00200020ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x1e0000001e000000; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0080200000802000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080200000802000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00800080ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00800080ffffffff; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000040004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0400040004000400; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xb70012c4b714fc1e; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff017e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fe02b71c199d; ++ *((unsigned long *)&__m256i_result[0]) = 0x017e017e00ff017e; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0049ffd2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01620133004b0032; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffb5ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_result[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00010000002fff9e; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbabababababababa; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8787878a00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003f3fc6c68787; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f3f87870000; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fff003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fff003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007fff; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000002467db99; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003e143852; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000002467db99; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003e143852; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000246700003e14; ++ *((unsigned long *)&__m256i_result[2]) = 0x000044447bbbf777; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000246700003e14; ++ *((unsigned long *)&__m256i_result[0]) = 0x000044447bbbf777; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0006000000020000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0006000000020000; ++ __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xbff00000bff00000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xbff00000bff00000; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x9ff87f7f7f807f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x9ff87f7f7f807f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffe98; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe98; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000007f00000000; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c +new file mode 100644 +index 000000000..a2edbb80a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c +@@ -0,0 +1,130 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x010180068080fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickve_w (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000c40086; ++ __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff820002ff820002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff820002ff820002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000200020002; ++ __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickve_w (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvpickve_w (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvpickve_w (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c +new file mode 100644 +index 000000000..8bd3a8273 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c +@@ -0,0 +1,388 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0cc08723ff900001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xcc9b89f2f6cef440; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x7); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ int_result = 0x000000000000ffff; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x6); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff90ff81; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff90ff81; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f; ++ int_result = 0x000000000000007f; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefdfffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000fffffefd; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5555555580000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5555555580000000; ++ int_result = 0x0000000055555555; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002000400000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020006; ++ unsigned_int_result = 0x0000000000020006; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff8000000000000; ++ long_int_result = 0x1f0fdf7f3e3b31d4; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00fe01fc01fe01fc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x012c002c001c0006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00fe01fc01fe0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x012c002c001c000a; ++ long_int_result = 0xfe01fc01fe0000; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ unsigned_long_int_result = 0x00000000ffffffff; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_out, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ long_int_result = 0x00000000ffff0100; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ int_result = 0x000000007ff00000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ unsigned_long_int_result = 0x00000000ffffffff; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x6); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0xffffffffffffffff; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffff0100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff0100000001; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x7); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff0008; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x6); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ long_int_result = 0x000000000000ffff; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010001000100010; ++ unsigned_int_result = 0x0000000000100010; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000100040; ++ unsigned_int_result = 0x0000000000000040; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x6); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x6); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ long_int_result = 0xffffffffffffffff; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ unsigned_int_result = 0x00000000ffffffff; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ unsigned_int_result = 0x00000000ffffffff; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffd880; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffd880; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c +new file mode 100644 +index 000000000..81456bc1b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c +@@ -0,0 +1,380 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ int_op0 = 0x0000001b3c4c0a5c; ++ *((unsigned long *)&__m256i_result[3]) = 0x3c4c0a5c3c4c0a5c; ++ *((unsigned long *)&__m256i_result[2]) = 0x3c4c0a5c3c4c0a5c; ++ *((unsigned long *)&__m256i_result[1]) = 0x3c4c0a5c3c4c0a5c; ++ *((unsigned long *)&__m256i_result[0]) = 0x3c4c0a5c3c4c0a5c; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000397541c58; ++ *((unsigned long *)&__m256i_result[3]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_result[2]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_result[1]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_result[0]) = 0x97541c5897541c58; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[0]) = 0x0400040004000400; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000084; ++ *((unsigned long *)&__m256i_result[3]) = 0x0084008400840084; ++ *((unsigned long *)&__m256i_result[2]) = 0x0084008400840084; ++ *((unsigned long *)&__m256i_result[1]) = 0x0084008400840084; ++ *((unsigned long *)&__m256i_result[0]) = 0x0084008400840084; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000020202020; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000020006; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000020006; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c +new file mode 100644 +index 000000000..7aa76c2ba +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c +@@ -0,0 +1,536 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000001b3c4c0a5c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffefb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffefb; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe; ++ int_op1 = 0x0000000059815d00; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab; ++ int_op1 = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[0]) = 0x555555ab555555ab; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000012e2110; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ int_op1 = 0x0000000000000400; ++ *((unsigned long *)&__m256i_result[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[2]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[0]) = 0x003f003f003f003f; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f003f003f003f; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[2]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[0]) = 0x003f003f003f003f; ++ __m256i_out = __lasx_xvreplve_w (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000003f0000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe161616161614e60; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_result[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_result[1]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_result[0]) = 0xe161616161614e60; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ int_op1 = 0x00000000000000ac; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00d5007f00ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00d5007f00ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_w (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000020202020; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplve_w (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffff7fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffff7fffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc192181230000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc192181230000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff00ff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff00ff; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fef7fef7fef7fef; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff00ffffffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff0000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f010700c70106; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f010700c70106; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_result[2]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_result[1]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_result[0]) = 0x0106010601060106; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_w (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003fff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000404; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000404; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_result[2]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_result[1]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_result[0]) = 0x0404040404040404; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000202; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000202; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000003ddc5dac; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c +new file mode 100644 +index 000000000..a2bc2da52 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c +@@ -0,0 +1,471 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffeffffff88; ++ *((unsigned long *)&__m256i_op0[2]) = 0x61e0000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffeffffff88; ++ *((unsigned long *)&__m256i_op0[0]) = 0x61e0000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff80fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xd52aaaaa555555ab; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff80fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xd52aaaaa555555ab; ++ *((unsigned long *)&__m256i_result[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[0]) = 0x555555ab555555ab; ++ __m256i_out = __lasx_xvreplve0_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000403f3fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fff3fff3fff3fff; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000001; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0007fd00000f02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fffeff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00fe00feff02ff; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfc00ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfc00ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00fe00fe00fe00fe; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvreplve0_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000781; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_result[3]) = 0x047a047a047a047a; ++ *((unsigned long *)&__m256i_result[2]) = 0x047a047a047a047a; ++ *((unsigned long *)&__m256i_result[1]) = 0x047a047a047a047a; ++ *((unsigned long *)&__m256i_result[0]) = 0x047a047a047a047a; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x037fe01f001fe020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x037fe01f001fe020; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m256i_out = __lasx_xvreplve0_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202010202020102; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x800080ff800080ff; ++ __m256i_out = __lasx_xvreplve0_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_result[3]) = 0x97a297a297a297a2; ++ *((unsigned long *)&__m256i_result[2]) = 0x97a297a297a297a2; ++ *((unsigned long *)&__m256i_result[1]) = 0x97a297a297a297a2; ++ *((unsigned long *)&__m256i_result[0]) = 0x97a297a297a297a2; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c +new file mode 100644 +index 000000000..9346f9bfb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c +@@ -0,0 +1,20 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c +new file mode 100644 +index 000000000..c8a00ca89 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c +@@ -0,0 +1,430 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007ffffffff7ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x49d8080067f4f81f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f00fffff7ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xd8490849f467f867; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0xb7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0xdb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0x95); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffb3b4; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff5ffff4738; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffb3b4; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff5ffff4738; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0xee); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0x6f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007ffffffff7ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x49d8080067f4f81f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7ffff7ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x080008000800f81f; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0xa8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_result[3]) = 0xc5c4c5c5c5c5c5c5; ++ *((unsigned long *)&__m256i_result[2]) = 0xc5c545c545c545c5; ++ *((unsigned long *)&__m256i_result[1]) = 0xc5c4c5c5c5c5c5c5; ++ *((unsigned long *)&__m256i_result[0]) = 0xc5c545c545c545c5; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0xf7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0000; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0xa7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0xdc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff8001ffff8001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff8001ffff8001; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x6e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x9f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002ffff00020002; ++ *((unsigned long *)&__m256i_result[2]) = 0x04f504f104f504f5; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002ffff00020002; ++ *((unsigned long *)&__m256i_result[0]) = 0x04f504f104f504f5; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x65); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1e18000000000000; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xfe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0x64); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_result[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_result[0]) = 0x45c5c5c545c5c5c5; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xb0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000f9f900; ++ *((unsigned long *)&__m256i_op0[2]) = 0x79f9f9f900f9f9e0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000f9f900; ++ *((unsigned long *)&__m256i_op0[0]) = 0x79f9f9f900f9f900; ++ *((unsigned long *)&__m256i_result[3]) = 0x00f9f90079f9f9f9; ++ *((unsigned long *)&__m256i_result[2]) = 0x79f9f9f900000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00f9f90079f9f9f9; ++ *((unsigned long *)&__m256i_result[0]) = 0x79f9f9f900000000; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0x97); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_result[3]) = 0x7aff7c0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfd017d0000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7aff7c0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfd017d0000000000; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xb3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_result[3]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_result[2]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_result[1]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_result[0]) = 0xc3f0c3f0c3f0c3f0; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xf4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_result[3]) = 0xff81ff7dffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_result[1]) = 0xff81ff7dffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff81ff7d; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0x28); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000020ff790020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000020ff790020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xa5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010183f95466; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01010101d58efe94; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010183f95466; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x01010101d58efe94; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0xa7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0xd9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001fff00001fff; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff80be0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000f0f0002; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff80be0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000f1002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x80000000ff800000; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0xdb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x43ef878780000009; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x36); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x5a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_result[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_result[0]) = 0x04f104f104f504ed; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c +new file mode 100644 +index 000000000..641ea2315 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c +@@ -0,0 +1,761 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000007070707; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0102040000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000020100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0703020000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff49fe4200000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffffff8fffffff8; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfffffff8fc000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfafafafafafafafa; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000fefefe; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x67eee33567eee435; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x67eee33567eee435; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op1[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7575ffff75757595; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7575ffff7575f575; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7575ffff75757595; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7575ffff7575f575; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_result[3]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[2]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[1]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[0]) = 0xf800f800f800f800; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff88ff88ff880000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000010000ffe1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000101001e18; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000010000ffe1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000101001e18; ++ *((unsigned long *)&__m256i_op1[3]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[2]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[1]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[0]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000010000ffe1; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000101001e18; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000010000ffe1; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000101001e18; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000101001e18; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000101001e18; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80008000b3e8fef1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80008000802ea100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000012e2110; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x012e2110012e2110; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000082a54290; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000028aa700; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000082a54290; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54287; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007fc00000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fc00000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fc00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fc00000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xdfffffffdfffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xdfffffffdfffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000104000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000104000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000500040005; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[0]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000080008000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000080008000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fffffe01fe52; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff01ff02; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffffe01fe52; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff01ff02; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000080008001; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff800000ff; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000080040; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffeb8649d0d6250; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffeb8649d0d6250; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvsll-xvsrl-instr.patch b/LoongArch-Add-tests-for-ASX-vector-xvsll-xvsrl-instr.patch new file mode 100644 index 0000000000000000000000000000000000000000..ff6535633a07c9a4e72e340308860886498c89fe --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvsll-xvsrl-instr.patch @@ -0,0 +1,5611 @@ +From e90910ab68c43259f898fb7b2cba02d4eb457428 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 15:44:49 +0800 +Subject: [PATCH 106/124] LoongArch: Add tests for ASX vector xvsll/xvsrl + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvsll.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvslli.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrl.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrli.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrln.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvsll.c | 425 +++++++++++ + .../loongarch/vector/lasx/lasx-xvslli.c | 416 +++++++++++ + .../loongarch/vector/lasx/lasx-xvsllwil-1.c | 339 +++++++++ + .../loongarch/vector/lasx/lasx-xvsllwil-2.c | 350 +++++++++ + .../loongarch/vector/lasx/lasx-xvsrl.c | 650 +++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvsrli.c | 405 +++++++++++ + .../loongarch/vector/lasx/lasx-xvsrln.c | 425 +++++++++++ + .../loongarch/vector/lasx/lasx-xvsrlni.c | 680 ++++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvsrlr.c | 515 +++++++++++++ + .../loongarch/vector/lasx/lasx-xvsrlri.c | 416 +++++++++++ + .../loongarch/vector/lasx/lasx-xvsrlrn.c | 410 +++++++++++ + .../loongarch/vector/lasx/lasx-xvsrlrni.c | 455 ++++++++++++ + 12 files changed, 5486 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c +new file mode 100644 +index 000000000..7179e715c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c +@@ -0,0 +1,425 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00001f41ffffbf00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001f41ffffbf00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffe0000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000fffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000fffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000808080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff2f7bcfff2f7bd; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff2f93bfff2fff2; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff2f7bcfff2f7bd; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff2f93bfff2fff2; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffcf800fffcfffc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffcfffc; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff0e400; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5980000000000000; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001ff8000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001ff8000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvsll_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800000ff800000ff; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c +new file mode 100644 +index 000000000..003e29b67 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c +@@ -0,0 +1,416 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fa022a01a401e5; ++ *((unsigned long *)&__m256i_op0[2]) = 0x030d03aa0079029b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x024c01f901950261; ++ *((unsigned long *)&__m256i_op0[0]) = 0x008102c2008a029f; ++ *((unsigned long *)&__m256i_result[3]) = 0x54000000ca000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5400000036000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf2000000c2000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x840000003e000000; ++ __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff1001100100000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff1001100100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfcc4004400400000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0040400000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfcc4004400400000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0040400000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffef000004ea; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffef000004ea; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffefffffffef; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffbf4; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_result[3]) = 0xf800f800f800c000; ++ *((unsigned long *)&__m256i_result[2]) = 0xf800f800f800a000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_result[0]) = 0xf800f800f800e000; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffefefffffefe; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0100010001000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100010001000000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xf000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1616161616161616; ++ *((unsigned long *)&__m256i_op0[2]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe16167f161616; ++ *((unsigned long *)&__m256i_op0[0]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x2c2c2c2c2c2c2c2c; ++ *((unsigned long *)&__m256i_result[2]) = 0x2c2c2c2cfefefefe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefc2c2cfe2c2c2c; ++ *((unsigned long *)&__m256i_result[0]) = 0x2c2c2c2cfefefefe; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m256i_result[2]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m256i_result[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m256i_result[0]) = 0xf8f8f8f8f8f8f8f8; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1f60000000c00000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1f60000000c00000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x60000000c0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x60000000c0000000; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff80ff80ff80ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff80ff80ff80ff80; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000008000000080; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00080008000801ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00080008000801ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_result[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_result[0]) = 0xf0f0f0f0f0f0f0f0; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x03f0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x03f0000000000000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x34); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffff80000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffff80000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefefe; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[2]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[1]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[0]) = 0xf800f800f800f800; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0404000004040000; ++ __m256i_out = __lasx_xvslli_w (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000c040c0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000c040c0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff000000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c +new file mode 100644 +index 000000000..ef3a47da5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c +@@ -0,0 +1,339 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe0ffe0ffe0ffe0; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000003f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000003f0; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0218ff78fc38fc38; ++ *((unsigned long *)&__m256i_result[2]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256i_result[1]) = 0x0218ff78fc38fc38; ++ *((unsigned long *)&__m256i_result[0]) = 0xfc00000000000048; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff0fff0fff0fc00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff0fff0fff0fc00; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfc00000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffc00fffffc00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffc00fffffc00; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000a000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000a000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000400000004000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffbf4; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffc; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffbff1ffffbff1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffbff1ffffbff1; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffeffc4000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffeffc4000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffeffc4000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffeffc4000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffe05fc47b400; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffe06003fc000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffe05fc47b400; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffe06003fc000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000feccfecc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000feccfecc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fe363637fe36364; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fe363637fe36364; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001ff8d8d8c000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00001ff8d8d90000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001ff8d8d8c000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001ff8d8d90000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c +new file mode 100644 +index 000000000..76651af63 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c +@@ -0,0 +1,350 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x03fc03fc03f803f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x03fc03fc03f803f8; ++ *((unsigned long *)&__m256i_result[1]) = 0x03fc03fc03f803f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe01fe01fe01fe; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x01fc03e000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x01fc03e000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00fe01e000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00fe01e000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_result[3]) = 0x01c03f8034c03200; ++ *((unsigned long *)&__m256i_result[2]) = 0x3dc02b400a003400; ++ *((unsigned long *)&__m256i_result[1]) = 0x01c03f8034c03200; ++ *((unsigned long *)&__m256i_result[0]) = 0x3dc02b400a003400; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000054; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00aa000000ac00fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000054; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00aa000000ac00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002a80000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002b0000003f800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002a80000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002b0000003f800; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1076000016160000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1610000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1076000016160000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1610000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007f00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x311d73ad3ec2064a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001fc000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000c475ceb40000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fb0819280000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004040404000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000007c8; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000086000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00040ff288000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000086000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00040ff288000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001ffe00000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001ffe00000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80000000ffc8ff88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80000000ffc8ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001ff91ff100000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001ff91ff100000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000008c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000008c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001180000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001180000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c +new file mode 100644 +index 000000000..1d591c35c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_result[2]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_result[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_result[0]) = 0x6580668200fe0002; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff874dc687870000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_result[0]) = 0xff874dc687870000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ffffff00ffff; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff0000; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000008e4bfc4eff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001ffee10000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000008e4bfc4eff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001ffee10000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d000000000d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0000060d0d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d000000000d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0000060d0d; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff03ffffff07; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff03ffffff07; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000040404000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000040404000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000040004000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000040404040; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe01fe01fd02fd02; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe01fe01fd02fd02; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000405; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000405; ++ *((unsigned long *)&__m256i_result[3]) = 0xfe01fe017e81fd02; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fc001fe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfe01fe017e81fd02; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000003fc001fe; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000003ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000003ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001ffffffffffff; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001ffff8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001ffff8000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[0]) = 0xfd02fd02fd02fd02; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0005fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x04f004f204f204f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0005fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x04f004f204f204f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000002780; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c +new file mode 100644 +index 000000000..e8696701f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c +@@ -0,0 +1,405 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000050005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1010101110101011; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1111111211111112; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004444; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffcc8000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007dfdff4b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x003ffff300000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000001f7f7f; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9240f24a84b18025; ++ *((unsigned long *)&__m256i_op0[2]) = 0x9240f24a84b18025; ++ *((unsigned long *)&__m256i_op0[1]) = 0xb2c0b341807f8006; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb2c0b341807f8006; ++ *((unsigned long *)&__m256i_result[3]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_result[2]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_result[1]) = 0x00b200b300800080; ++ *((unsigned long *)&__m256i_result[0]) = 0x00b200b300800080; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001ffff0001ffff; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffcb423a587053; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6d46f43e71141b81; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffcb423a584528; ++ *((unsigned long *)&__m256i_op0[0]) = 0x9bdf36c8d78158a1; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000007fffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000036a37; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000007fffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000004def9; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0889088908810881; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0081010000810100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0889088900810088; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0081010000810100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004448444844084; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000408080004080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004448444804080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000408080004080; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000001d001d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000001d001d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000030003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000030003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x22); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000307; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000a0010400a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000a0010400a; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000598; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000598; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff00; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001ffff0001ffff; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fffffff3fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fffffff3fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fff00003fff; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fff3fff3fff3fc4; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fff3fff3fff3fc4; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c +new file mode 100644 +index 000000000..d54991051 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c +@@ -0,0 +1,425 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00386a20b8aee1d8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00386a20b8aee1d8; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101000001010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x88888a6d0962002e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdb8a3109fe0f0020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000007fff01fffb; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdb8e20990cce025a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff01ff3400000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff83ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0962002efe0f0020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01fffb8667012d; ++ __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffeffeb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fb7afb62; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffeffeb; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fb7afb62; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffeffebfb7afb62; ++ __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff017e6b803fc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff017e6b803fc0; ++ __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000781; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000078100000064; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xa1a1a1a1a1a15e5e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xa1a1a1a1a1a15e5e; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1716151417161514; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1716151417161514; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1716151417161514; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1716151417161514; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0fff0fff0fff0fff; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c +new file mode 100644 +index 000000000..0fb6483cf +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c +@@ -0,0 +1,680 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffc500000002d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000034; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbfa3e127c147721f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1729c173836edfbe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdf91f111808007fb; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5ff1f90ffffbf30f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff280016; ++ *((unsigned long *)&__m256i_result[2]) = 0xd193a30f94b9b7df; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000001001a; ++ *((unsigned long *)&__m256i_result[0]) = 0xc88840fdf887fd87; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000f; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffc5556aaa8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffc5556aaa8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000007070205; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002020100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000007070205; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002020100; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x36); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x73); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffe01fe01f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe01fe01f; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffe01fe01f; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffe01fe01f; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fe01020b0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fe01020b0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fff0fff00000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0fff0fff00000020; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op0[1]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x01fb16ef98f97e90; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x01fb16ef98f97e90; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffa2078fffa2074; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffa2078fffa2074; ++ *((unsigned long *)&__m256i_result[3]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x01ff01ff01ff01ff; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003e6c0000cb7a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003e6c0000cb7a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x40000000b000032d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x40000000b000032d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fc03fc01fc03fc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fc03fc01fc03fc; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ef0120; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ef0120; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff0120; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000e9ec0000e9ec; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff0120; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000e9ec0000e9ec; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffdd001dffe00020; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffdd001dffe00031; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffdd001dffe00020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffdd001dffe00031; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3ff73ff83ff73ff8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3ff73ff83ff73ff8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0600060000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0600060000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000007fff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000007fff8; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1e0000001e002000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1e0000001e002000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff3225; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff3225; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1a19181716151413; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1a19181716151413; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000004442403; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000004442403; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x63); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fef0000ffff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fef0000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xde00fe0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fe010000fe01; ++ *((unsigned long *)&__m256i_result[1]) = 0xde00fe0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fe010000fe01; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000007070707; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff07070707; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000007070707; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff07070707; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x03ff000003ff03ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x03ff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x03ff000003ff03ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x03ff000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x0007ffff0007ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0007ffff0007ffff; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x03802fc000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x03802fc000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x5a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x080808000828082f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0808080008280820; ++ *((unsigned long *)&__m256i_op0[1]) = 0x080808000828082f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0808080008280820; ++ *((unsigned long *)&__m256i_op1[3]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op1[2]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op1[0]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00828082f0808080; ++ *((unsigned long *)&__m256i_result[2]) = 0xf18181818132feea; ++ *((unsigned long *)&__m256i_result[1]) = 0x00828082f0808080; ++ *((unsigned long *)&__m256i_result[0]) = 0xf18181818132feea; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x24); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x39); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x43); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe01fe01fc01fc01; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe01fe01fc01fc01; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfc01000000003fc0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfc01000000003fc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000feff0001ffb8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000feff0001ffb8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000126000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2555205ea7bc4020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000126000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2555205ea7bc4020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op1[2]) = 0x10ffffff10000006; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op1[0]) = 0x10ffffff10000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000004980008; ++ *((unsigned long *)&__m256i_result[2]) = 0x003ffffffc400000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000004980008; ++ *((unsigned long *)&__m256i_result[0]) = 0x003ffffffc400000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x46); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00f0000000f00010; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0ff00fff0ff10; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00f0000000f00010; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0ff00fff0ff10; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0087ff87f807ff87; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0087ff87f807ff87; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x68); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x50); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000050005; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf007fe76f008fe19; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf08aff01f07cc291; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf007fe76f008fe19; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf08aff01f07cc291; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000001400; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000003c01ff9; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c +new file mode 100644 +index 000000000..22e62a3e7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x40d74f979f99419f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x40d74f979f99419f; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff8080000004000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000080000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff8080000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff0000000000080; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001020202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003ddd80007bbb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003ddd80007bbb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0ea85f60984a8555; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00a21ef3246995f3; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1189ce8000fa14ed; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0e459089665f40f3; ++ *((unsigned long *)&__m256i_result[3]) = 0x000100f800000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000f800000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000000000010; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f7f000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f7f000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100010001; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100000001000100; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000008; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffff80; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffff80; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000430207f944; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000038000000268; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000038000000268; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff010ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff010ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000201; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000201; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff01fb0408; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff01fb0408; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0xf2b180c9fc1fefdc; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0xff1cff1cff1c3fc7; ++ *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0xff1cff1cff1c3fc7; ++ *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff01ff01ff01f010; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff01ff01ff01f010; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff01ff01ff01f010; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff01ff01ff01f010; ++ *((unsigned long *)&__m256i_result[3]) = 0x000078780000f0f1; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000078780000f0f1; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffc00040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffc00040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1080108010060002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1080108010060002; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c +new file mode 100644 +index 000000000..71f770aff +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c +@@ -0,0 +1,416 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x33); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x28); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000505; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff0002fffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0002ff7e8286; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff0002fffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0002ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202000002020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202000002010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0202000002020202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202000002020000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_result[3]) = 0x0703030307030203; ++ *((unsigned long *)&__m256i_result[2]) = 0x0703030307030203; ++ *((unsigned long *)&__m256i_result[1]) = 0x0703030307030203; ++ *((unsigned long *)&__m256i_result[0]) = 0x0703030307030203; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f3fc6c68787; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f87870000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003f3fc6c68787; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f3f87870000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010183f95466; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01010101d58efe94; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000101000083f95; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001010000d58f0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000101000001010; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010002000100020; ++ __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020000000200000; ++ __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000020000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000020000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x39); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000040000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000040000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000040000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000040000000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op0[2]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op0[0]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_result[2]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_result[0]) = 0x132feea900000000; ++ __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000038000000268; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000038000000268; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000001200000011a; ++ *((unsigned long *)&__m256i_result[2]) = 0x2040204020402040; ++ *((unsigned long *)&__m256i_result[1]) = 0x000001200000011a; ++ *((unsigned long *)&__m256i_result[0]) = 0x2040204020402040; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffa003e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffb009c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffa003e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffb009c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020004000400040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020004000400040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020004000400040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020004000400040; ++ __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000800000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffbfffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffbfffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0102020202010202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0102020202010202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008000000000000; ++ __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c +new file mode 100644 +index 000000000..cbc1de371 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c +@@ -0,0 +1,410 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffff328dfff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6651bfff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202020201010000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000050005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000505; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff820002ff820002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff820002ff820002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00020002ff820002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00020002ff820002; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00020421d7d41124; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00020421d7d41124; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff000200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff000200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff020000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff020000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001fe01fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fe01fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe01fe0000ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe01fe0000ff01; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f900000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f900000002; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00043fff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00043fff00000000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff1cff1b00e300e4; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff1cff1b00e300e4; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff1cff1b00e300e4; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff1cff1b00e30100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x002000000020ffff; ++ __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffdbff980038ffaf; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffafffe80004fff1; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffdbff980038ffaf; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffafffe80004fff1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000020202020202; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000020202020202; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000e3fec0004fff1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000e3fec0004fff1; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c +new file mode 100644 +index 000000000..8fc7a0029 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c +@@ -0,0 +1,455 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x7a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100010001000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000808000008080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000808000008081; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000081; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x68); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000801380f380fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000801380f300fb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000007f3a40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x42); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x56); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf0000000f0000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf0000000f0000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_h_w (__m256i_op0, __m256i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000004fc480040; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000004fc480040; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000004fc480040; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000004fc480040; ++ __m256i_out = __lasx_xvsrlrni_h_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004000404040404; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004000400000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000400000004; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_h_w (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x80208020c22080a7; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80208020c22080a7; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdf80ff20df80ff20; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdf80ff20df80ff20; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000840100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbffebffec0febfff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000840100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbffebffec0febfff; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffc0c0ffffbfc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffc0c0ffffbfc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f0000400d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f0000400d; ++ *((unsigned long *)&__m256i_result[3]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x44); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbfffa004fffd8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbfffa004fffd8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003f0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00002fffe8013fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003f0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00002fffe8013fff; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000101000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000101000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x5a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00b2fe28e4420609; ++ *((unsigned long *)&__m256i_op0[2]) = 0x028da7fe15020000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00b2fe28e4420609; ++ *((unsigned long *)&__m256i_op0[0]) = 0x028da7fe15020000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000598; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000598; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x6d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000800000010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000800000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000003ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000003ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_w_d (__m256i_op0, __m256i_op1, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0040000000000000; ++ __m256i_out = __lasx_xvsrlrni_w_d (__m256i_op0, __m256i_op1, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff0fc00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff0fc00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f880f87e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f880f87e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvsrlrni_h_w (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000081220000812c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000812000008120; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000081220000812c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000812000008120; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefefe; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvssran-xvssrani-.patch b/LoongArch-Add-tests-for-ASX-vector-xvssran-xvssrani-.patch new file mode 100644 index 0000000000000000000000000000000000000000..534b17d51c718f31cc5b3bb5efc88e73b780b454 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvssran-xvssrani-.patch @@ -0,0 +1,4258 @@ +From 445ae07ab55a647f7aec97c2334fb276a44f2af1 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Wed, 13 Sep 2023 12:37:41 +0800 +Subject: [PATCH 121/124] LoongArch: Add tests for ASX vector + xvssran/xvssrani/xvssrarn/xvssrarni instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvssran.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvssrani.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvssran.c | 905 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvssrani.c | 1235 +++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvssrarn.c | 905 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvssrarni.c | 1160 ++++++++++++++++ + 4 files changed, 4205 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c +new file mode 100644 +index 000000000..fdb0c25f1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c +@@ -0,0 +1,905 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007ffe81fdfe03; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ffe800000000000; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffef000004ea; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000607f700000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000607f700000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffe81; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00f9f90079f9f9f9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x79f9f9f900000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00f9f90079f9f9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x79f9f9f900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f007f78; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000033007e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000021; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f7f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f7f00007fff; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000080; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff00000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000002aaad555; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000002aaad555; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff00000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffc00000ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffc00000ffc0ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffcfee0fe00ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffcfee0fe00ffe0; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op1[1]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff900000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff900000003; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0000; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff2400000000ff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffeffe4fffeff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff6400000000ff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffeff66fffeff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000100da000100fd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001ffe20001fefd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001009a000100fd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001ff640001fefd; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fe0100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fe0100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000016000000480d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000016000000480d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1131288800000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1131288800000002; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f3f7f007f1f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f3f7f007f1f; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff00000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000007ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8d8d72728d8d7272; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8d8d72728d8d7272; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001010800; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0008; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe27fe2821d226278; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe27fe2821d226278; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080ff0080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080ff0080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff000000000080; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007f7f; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000feff0001ffb8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000feff0001ffb8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff1cff18; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff1cff18; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe013fcf2e015fc38; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe013fd00dff78420; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe013fcf2e015fc38; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe013fd00dff78420; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000003fffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000003fffc0; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c +new file mode 100644 +index 000000000..dd3c2c6f6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c +@@ -0,0 +1,1235 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f057f0b7f5b007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000007f007f5; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fc000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000c475ceb40000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fb0819280000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x074132a240000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000003a0200; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007fff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007fff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x37); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff8001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff0ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff0ffff0000; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001ffffff; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x73); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0100010001000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0100010001000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000400040004; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x22); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x6b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xce7ffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xce7ffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff39ffffff; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x5e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe8001b72e0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xb72e8001b72eaf12; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe000247639d9c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb5308001b72eaf12; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00001fff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00001fff; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x38f7414938f7882f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x38f7414938f78830; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000801380f380fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000801380f300fb; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0303030303020000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0303030303020000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x31); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x4d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x59); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd04752cdd5543b56; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6906e68064f3d78b; ++ *((unsigned long *)&__m256i_op0[1]) = 0xd04752cdd5543b56; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6906e68064f3d78b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000004560420; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000004560420; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000fff00004542; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000fff00004542; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00c0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000c0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000040000000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffffe02; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000300000005fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffff02; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000300000005fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0007fd00000f02ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fffeff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ffffffff00; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000018; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000019; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000200000001e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000019; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004000000030000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000400000003c000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x33); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x009c3e201e39e7e3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x87c1135043408bba; ++ *((unsigned long *)&__m256i_op0[1]) = 0x009c3e201e39e7e3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x87c1135043408bba; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f5c8f374980; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f7f7f5c8f374980; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100007f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100007f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x39); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007c7fff00007fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00817fff00810000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007c7fff00007fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00817fff00810000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x7c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000457d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000b03f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000457d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000b03f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x2000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0f000f000f000f00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0f000f000f000f00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007fc0083fc7c007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007fc0083fc7c007; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x42); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00067fff00047fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00027fff000080fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00067fff00047fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00027fff000080fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x067f047f027f0080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x067f047f027f0080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0af57272788754ab; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000005e80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0af57272788754ab; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000005e80; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000f0f0f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f0000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000f0f0f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f0000007f; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100000000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x4b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0400100004001000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0400100004001000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256i_op0[2]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256i_op0[0]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000700000008; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000700000008; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x55); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc07f8000c07f8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc07f8000c07f8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fff01fe0; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fff01fe0; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fe96fe95; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6afc01000001ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fe96fe95; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6afc01000001ff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010000ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000010000ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000040404000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000040404000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000404; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000404; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4000400040004000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000400040004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x4000400040004000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000400040004000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000020202000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000020202000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000001ff1; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000001ff1; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x53); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x03fbfffc03fc07fc; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x03fbfffc03fc07fc; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff80000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff003fffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000003fffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffc00fffffc00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffc00fffffc00; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[2]) = 0xc03fc03fc03fc03f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[0]) = 0xc03fc03fc03fc03f; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x6c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x60); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff0000fffd0004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000fffd0004; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff0000fffd0004; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000fffd0004; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000f; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x6c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c +new file mode 100644 +index 000000000..7848ddd41 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c +@@ -0,0 +1,905 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000100da000100fd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001ffe20001fefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001009a000100fd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001ff640001fefd; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000100da000100fd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001ffe20001fefd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001009a000100fd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001ff640001fefd; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007ff90000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000001ff60000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc2c2ffffc2c2; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffc2c2ffffc2c2; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc2c2ffffc2c2; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffc2c2ffffc2c2; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x003100310031002f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x003100310031002f; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffefffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000000000002; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff6f20; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000781e0000f221; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff6f20; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000781e0000f221; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00007f7f80007fa3; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007f670000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00007f7f80007fa3; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007f670000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000008; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000800400010006d; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0200000002000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x02000000fdffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0200000002000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x02000000fdffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000004ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000004ffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff000000ff000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff000000ff000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffb6811fffff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff97c120000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffb6811fffff80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff97c120000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xdb410010cbe10010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xdb410010cbe10010; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000019ffdf403; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000011ffd97c3; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000019ffdf403; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000011ffd97c3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x002000000020ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3838383838383838; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffdfffffe00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3838383838383838; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffdfffffe00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000020002000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000020002000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffbffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffbffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000001ec020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000001ec020; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c +new file mode 100644 +index 000000000..b1c16baf4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c +@@ -0,0 +1,1160 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000020000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000020000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f20; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000009f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00001f41ffffbf00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000400000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000010000000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x5d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf5f5bfbaf5f5bfbe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf5f0bfb8f5d8bfe8; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf5f5bfbaf5f5bfbe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf5f0bfb8f5d8bfe8; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf5f5bfbaf5f5bfbe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf5f0bfb8f5d8bfe8; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf5f5bfbaf5f5bfbe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf5f0bfb8f5d8bfe8; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff5f5c; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x6c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op1[3]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op1[2]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op1[1]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op1[0]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffff6ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffff6ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a09080706050403; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a09080706050403; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003000200000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003000200000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001010300010102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000410041; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000df93f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000077843; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000003800000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x73); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8001b72e0001b72e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8001b72eaf12d5f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000247639d9cb530; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8001b72eaf12d5f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe056fd9d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffceba70; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00150015003a402f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x333568ce26dcd055; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00150015003a402f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x333568ce26dcd055; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000007d0d0d0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000007d0d0d0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800000098; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000040000ffca; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800000098; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000040000ff79; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff04ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff04ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000008000000a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000008000000a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x44); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000120e120d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000120e120d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffffe; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffe000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffe000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x54); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00030006fa05f20e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00030081bd80f90e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000018; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000018; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x02407a3c00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0d0cf2f30d0cf2f3; ++ *((unsigned long *)&__m256i_op0[1]) = 0x02407a3c00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0d0cf2f30d0cf2f3; ++ *((unsigned long *)&__m256i_op1[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op1[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ff0fff0fff0f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff0fff0fff0f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x74); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xde00fe0000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fe010000fe01; ++ *((unsigned long *)&__m256i_op0[1]) = 0xde00fe0000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fe010000fe01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x79); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000070007000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0e0e0e0e0e0e0e0e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000e0e0e0e0e0e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xa1a1a1a1a1a15e5e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xa1a1a1a1a1a15e5e; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fe000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x45); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0001fffa; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00018069; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001fffe0001fffa; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe00018069; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000002000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000002000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x64); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000004000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000004000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00000000ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00000000ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000038000000268; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000038000000268; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001010101; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0400000004000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_result[1]) = 0x0400000004000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000400; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x5b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x08000000000000f8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x08000000000000f8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0200000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0200000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x2000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x6a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x36); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000040000001b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000040000001b; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f80ffffff808000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f80ffffff808000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001e00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000500020002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000700020033; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000500020002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000700020033; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000500020002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000700020033; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000500020002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000700020033; ++ *((unsigned long *)&__m256i_result[3]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1400080008000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000001c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000001de; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000001c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000001de; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000060000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000060000000; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x44); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00003fea0014734d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fe900140d85; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00003fea0014734d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fe900140d85; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff0000ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff0000ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-vector-xvssrln-xvssrlni-.patch b/LoongArch-Add-tests-for-ASX-vector-xvssrln-xvssrlni-.patch new file mode 100644 index 0000000000000000000000000000000000000000..cd078c59c6b4c9bb4e1e01f63f698a0d360331e7 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-vector-xvssrln-xvssrlni-.patch @@ -0,0 +1,4123 @@ +From 983fd43b599dd252bc7f869be27bf1677f8eeca7 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Wed, 13 Sep 2023 12:35:41 +0800 +Subject: [PATCH 120/124] LoongArch: Add tests for ASX vector + xvssrln/xvssrlni/xvssrlrn/xvssrlrni instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvssrln.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvssrln.c | 965 ++++++++++++++ + .../loongarch/vector/lasx/lasx-xvssrlni.c | 1130 ++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvssrlrn.c | 815 ++++++++++++ + .../loongarch/vector/lasx/lasx-xvssrlrni.c | 1160 +++++++++++++++++ + 4 files changed, 4070 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c +new file mode 100644 +index 000000000..356eb2182 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c +@@ -0,0 +1,965 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x44bb2cd3a35c2fd0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xca355ba46a95e31c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000100ab000500a0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000200b800080124; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001011b000200aa; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00150118008f0091; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f057f0b7f5b007f; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000020000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000020000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007f00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7ffe7fffeffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffd84900000849; ++ *((unsigned long *)&__m256i_op0[0]) = 0x07fffc670800f086; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff0ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff0ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001700080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001700080; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000005536aaaaac; ++ *((unsigned long *)&__m256i_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000005536aaaaac; ++ *((unsigned long *)&__m256i_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000060102150101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000060102150101; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000003f0000; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f7f7f7f0000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff00000089; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe7fffecfe7fffec; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe7fffecfe7fffec; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffff600000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff000009ec; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffff600000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff000009ec; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8060000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8060000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff000000010000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff00000001; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff81ff7dffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff81ff7dffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f017ffd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f7f7f7f7f017ffd; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000007; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00011ffb0000bee1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00011ffb0000bee1; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff00000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f08181818; ++ *((unsigned long *)&__m256i_op0[2]) = 0x032feea900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f08181818; ++ *((unsigned long *)&__m256i_op0[0]) = 0x032feea900000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x41cfe01dde000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x41cfe01dde000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000013fc03bbc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000013fc03bbc; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8ff40; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ff0100090040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8ff40; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ff0100090040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000017f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f7f03030000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf80df80df80dfff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffdf80dfff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000017f7f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000017f7f7f7f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000017fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000017fff; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff000000017fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff000000017fff; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000003fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000003fffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff010100000001; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c +new file mode 100644 +index 000000000..116bebbb6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c +@@ -0,0 +1,1130 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f7f7f7f00007f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f28306860663e60; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x40d74f979f99419f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff7fff7fff; ++ __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000001e18; ++ __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x70); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[3]) = 0x1fffffff1fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0383634303836343; ++ *((unsigned long *)&__m256i_result[1]) = 0x1fffffff1fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0383634303836343; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x68); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x6c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000bf6e0000c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000030000fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000e00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff00ff; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00001000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00001000; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x39); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6aeaeaeaeaeaeaea; ++ *((unsigned long *)&__m256i_op1[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6aeaeaeaeaeaeaea; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000003f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000003f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffc0000fee0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fe000000ffe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff900000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff900000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ffe00007f000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ffe00007f000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe17cec8fe08008ac; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe0801f41e0800168; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9240f24a84b18025; ++ *((unsigned long *)&__m256i_op1[2]) = 0x9240f24a84b18025; ++ *((unsigned long *)&__m256i_op1[1]) = 0xb2c0b341807f8006; ++ *((unsigned long *)&__m256i_op1[0]) = 0xb2c0b341807f8006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000012481e4950; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001658166830; ++ __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x5b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x77777777f7777777; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf777777777777777; ++ *((unsigned long *)&__m256i_op0[1]) = 0x77777777f7777777; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf777777777777777; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff24; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff24; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404240; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404240; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000040404240; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000040404240; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f7f00007f7f; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00010001000c4411; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100044411; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002800000010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002800000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000200020018; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000200020008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000c0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000c0000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000040000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003030300000300; ++ *((unsigned long *)&__m256i_result[2]) = 0x0003030300000300; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003030300000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0003030300000100; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000800000; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fff00003fff; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000030007; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007f7f817f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007f7f817f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4ffc3f783fc040c0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3fc03f803fc040c0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4ffc3f783fc040c0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3fc03f803fc040c0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003fbfc0bfbfc03; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003fbfc0bfbfc03; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff56ff55ff01ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff56ff55ff01ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f7f7f7f; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xa90896a400000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa90896a400000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f7f000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f7f000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff80017fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff80017fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000000; ++ __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff810011; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff810011; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff8180ffff8181; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff8180ffff8181; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000008000ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff81ff81; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000008000ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff81ff81; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffebeeaaefafb; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffebeeaaeeeeb; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffebeeaaefafb; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffebeeaaeeeeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x01ffbfff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x03ffffff03ffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x01ffbfff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x03ffffff03ffffff; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x001f001f001f001f; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x61); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0200000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0200000000000000; ++ __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000003030000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000030400; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007000008e700000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007000008e700000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7171717171010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8e8e8e8e8f00ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7171717171010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8e8e8e8e8f00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xe2e2e202ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xe2e2e202ffffffff; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000e0010000e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000e0010000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x4e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x38); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01ff01ff01c0003e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01ff01ff01c0003e; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0707070707070707; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0707070707070707; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3000300030003000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3000300030003000; ++ __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x35); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000598; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000598; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002cc0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002cc0000; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x31); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f1d7f7f7f1d7f3b; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f1d7f7f7f1d7f3b; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202010202020102; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000dfffffff1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000cfffffff3; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000dfffffff1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000cfffffff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004000; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x31); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000080; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000118; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000118; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007efffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff80fffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007efffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff80fffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000e3ab0001352b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000e3ab0001352b; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff00007fff0000; ++ __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00011; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00011; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c +new file mode 100644 +index 000000000..977061097 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c +@@ -0,0 +1,815 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf800f800f800c000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf800f800f800a000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf800f800f800c000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf800f800f800a000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_op1[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op1[1]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_op1[0]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000200020002; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff0004ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff0004ff; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000005be55bd2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0404ffff00000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0404040800000010; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007f00f8ff7fff80; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff6a9d8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007f00f8ff7fff80; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff6a9d8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000019; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000019; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000070700000707; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000009091b1b1212; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000070700000707; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000009091b1b1212; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000027d00f8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x040204660265fe22; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000027d00f8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x040204660265fe22; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe273e273e273e273; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe273e273e273e273; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe273e273e273e273; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe273e273e273e273; ++ *((unsigned long *)&__m256i_op1[3]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op1[1]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001c4e8ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001c4e8ffffffff; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff0000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff0000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f0200007f02; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f0200007f02; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0097011900f4009f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003200d4010f0144; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0097011900f301cd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x010b008800f80153; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3fff8000ffa08004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3fff8000ffa08004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff01; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffbfffa0ffffff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffbfffa0ffffff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff02000000; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00020001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00020001; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff0000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff0000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000101000001010; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000404; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000404; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000000; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000200a000020020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000200a000020020; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1c3fc7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1c3fc7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000200000000; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000017f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000017f7f7f7f; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001001900010019; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0a02041904010019; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001001900010019; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0a02041904010019; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007b007e; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c +new file mode 100644 +index 000000000..b55e388b1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c +@@ -0,0 +1,1160 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3133c6409eecf8b0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xddf50db3c617a115; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa432ea5a0913dc8e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x29d403af367b4545; ++ *((unsigned long *)&__m256i_op1[3]) = 0x38a966b31be83ee9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5f6108dc25b8e028; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf41a56e8a20878d7; ++ *((unsigned long *)&__m256i_op1[0]) = 0x683b8b67e20c8ee5; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe06df0d7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x988eb37e000fb33d; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffed95be394b1e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000ffff8000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x06f880008000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x800080008000b8f1; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff00ff00; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000040100000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040100000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000040100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040100000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0080200000802000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080200000802000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000f18080010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000f18080010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000808080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x7c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000020afefb1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f350104f7ebffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000003fffc1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x005c0003fff9ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000fe6a021; ++ *((unsigned long *)&__m256i_result[1]) = 0x2000000020000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000b8000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000020001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x4b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x33); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000002020000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000201eff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000002020000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001fef010; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000000000; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff00000000; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x29); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0e0f1192846ff912; ++ *((unsigned long *)&__m256i_op0[2]) = 0x002a0074666a4db9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0e0f1192846ff912; ++ *((unsigned long *)&__m256i_op0[0]) = 0x002a0074666a4db9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000018; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000018; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0408040800000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0408040800000004; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[2]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[0]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001fbfbfc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001fbfbfc; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x62); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fe01020b0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fe01020b0001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000404040; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x68); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000010486048c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000010486048c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x6f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfe7fffecfe7fffec; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe7fffecfe7fffec; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808000800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd010101010101010; ++ *((unsigned long *)&__m256i_op0[2]) = 0xd010101010103218; ++ *((unsigned long *)&__m256i_op0[1]) = 0xd010101010101010; ++ *((unsigned long *)&__m256i_op0[0]) = 0xd010101010103218; ++ *((unsigned long *)&__m256i_op1[3]) = 0xd010101010101010; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd010101010103218; ++ *((unsigned long *)&__m256i_op1[1]) = 0xd010101010101010; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd010101010103218; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ff8000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ff8000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020000000200000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbc30c40108a45423; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbc263e0e5d00e69f; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbc30c40108a4544b; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbc20e63aa8b9663f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_hu_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0504080804030405; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0504060904040305; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0504080804030405; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0504060904040305; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000141020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000141020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x35); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000010101010; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000010101010; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010101010001000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010101000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000008d00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000008d00000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fe70000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fe70000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256i_result[2]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256i_result[0]) = 0x001fe020001fe020; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_hu_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000002000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000002000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x38); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000004; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_hu_w (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f010100000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f010100000101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008000000000010; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000008002d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000008002d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffbfff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3f7f7f7f407fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f7f7f7f407fffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000fdfdfe; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x07ffffff07ffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07ffffff08000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x07ffffff08000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x207f207f207f2000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000207f2000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb68380002001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c08000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb68380002001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c08000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000007fff5b41c0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000007fff5b41d0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000007fff5b41c0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000007fff5b41d0; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x59); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00c00040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000008000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00c00040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000008000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002000200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000020002000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000020002000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000008000000080; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x39); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_hu_w (__m256i_op0, __m256i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-ASX-xvldrepl-xvstelm-instruc.patch b/LoongArch-Add-tests-for-ASX-xvldrepl-xvstelm-instruc.patch new file mode 100644 index 0000000000000000000000000000000000000000..9f78bc7b50db0a418a00af298410f1681234a834 --- /dev/null +++ b/LoongArch-Add-tests-for-ASX-xvldrepl-xvstelm-instruc.patch @@ -0,0 +1,65 @@ +From 2ef90d604d7bae207d5b2067b4ce38d04d4835be Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 16:00:48 +0800 +Subject: [PATCH 110/124] LoongArch: Add tests for ASX xvldrepl/xvstelm + instruction generation. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-xvstelm.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lasx/lasx-xvldrepl.c | 16 ++++++++++++++++ + .../loongarch/vector/lasx/lasx-xvstelm.c | 14 ++++++++++++++ + 2 files changed, 30 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c +new file mode 100644 +index 000000000..105567951 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c +@@ -0,0 +1,16 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++/* { dg-final { scan-assembler-times "xvldrepl.w" 2} } */ ++ ++#define N 258 ++ ++float a[N], b[N], c[N]; ++ ++void ++test () ++{ ++ for (int i = 0; i < 256; i++) ++ { ++ a[i] = c[0] * b[i] + c[1]; ++ } ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c +new file mode 100644 +index 000000000..1a7b0e86f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c +@@ -0,0 +1,14 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++/* { dg-final { scan-assembler-times "xvstelm.w" 8} } */ ++ ++#define LEN 256 ++ ++float a[LEN], b[LEN], c[LEN]; ++ ++void ++test () ++{ ++ for (int i = 0; i < LEN; i += 2) ++ a[i] = b[i] + c[i]; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-Loongson-SX-builtin-function.patch b/LoongArch-Add-tests-for-Loongson-SX-builtin-function.patch new file mode 100644 index 0000000000000000000000000000000000000000..8502fe719e5c4256c164a3acdb2e0534ee86a35f --- /dev/null +++ b/LoongArch-Add-tests-for-Loongson-SX-builtin-function.patch @@ -0,0 +1,4354 @@ +From 1e9d9ec99e65201d8d926fddc89b6176abe9a4e6 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 09:38:42 +0800 +Subject: [PATCH 078/124] LoongArch: Add tests for Loongson SX builtin + functions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-builtin.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-builtin.c | 4328 +++++++++++++++++ + 1 file changed, 4328 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-builtin.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-builtin.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-builtin.c +new file mode 100644 +index 000000000..13013114d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-builtin.c +@@ -0,0 +1,4328 @@ ++/* Test builtins for LOONGARCH LSX ASE instructions */ ++/* { dg-do compile } */ ++/* { dg-options "-mlsx" } */ ++/* { dg-final { scan-assembler-times "lsx_vsll_b:.*vsll\\.b.*lsx_vsll_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsll_h:.*vsll\\.h.*lsx_vsll_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsll_w:.*vsll\\.w.*lsx_vsll_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsll_d:.*vsll\\.d.*lsx_vsll_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslli_b:.*vslli\\.b.*lsx_vslli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslli_h:.*vslli\\.h.*lsx_vslli_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslli_w:.*vslli\\.w.*lsx_vslli_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslli_d:.*vslli\\.d.*lsx_vslli_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsra_b:.*vsra\\.b.*lsx_vsra_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsra_h:.*vsra\\.h.*lsx_vsra_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsra_w:.*vsra\\.w.*lsx_vsra_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsra_d:.*vsra\\.d.*lsx_vsra_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrai_b:.*vsrai\\.b.*lsx_vsrai_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrai_h:.*vsrai\\.h.*lsx_vsrai_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrai_w:.*vsrai\\.w.*lsx_vsrai_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrai_d:.*vsrai\\.d.*lsx_vsrai_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrar_b:.*vsrar\\.b.*lsx_vsrar_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrar_h:.*vsrar\\.h.*lsx_vsrar_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrar_w:.*vsrar\\.w.*lsx_vsrar_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrar_d:.*vsrar\\.d.*lsx_vsrar_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrari_b:.*vsrari\\.b.*lsx_vsrari_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrari_h:.*vsrari\\.h.*lsx_vsrari_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrari_w:.*vsrari\\.w.*lsx_vsrari_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrari_d:.*vsrari\\.d.*lsx_vsrari_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrl_b:.*vsrl\\.b.*lsx_vsrl_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrl_h:.*vsrl\\.h.*lsx_vsrl_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrl_w:.*vsrl\\.w.*lsx_vsrl_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrl_d:.*vsrl\\.d.*lsx_vsrl_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrli_b:.*vsrli\\.b.*lsx_vsrli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrli_h:.*vsrli\\.h.*lsx_vsrli_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrli_w:.*vsrli\\.w.*lsx_vsrli_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrli_d:.*vsrli\\.d.*lsx_vsrli_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlr_b:.*vsrlr\\.b.*lsx_vsrlr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlr_h:.*vsrlr\\.h.*lsx_vsrlr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlr_w:.*vsrlr\\.w.*lsx_vsrlr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlr_d:.*vsrlr\\.d.*lsx_vsrlr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlri_b:.*vsrlri\\.b.*lsx_vsrlri_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlri_h:.*vsrlri\\.h.*lsx_vsrlri_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlri_w:.*vsrlri\\.w.*lsx_vsrlri_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlri_d:.*vsrlri\\.d.*lsx_vsrlri_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclr_b:.*vbitclr\\.b.*lsx_vbitclr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclr_h:.*vbitclr\\.h.*lsx_vbitclr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclr_w:.*vbitclr\\.w.*lsx_vbitclr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclr_d:.*vbitclr\\.d.*lsx_vbitclr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclri_b:.*vbitclri\\.b.*lsx_vbitclri_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclri_h:.*vbitclri\\.h.*lsx_vbitclri_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclri_w:.*vbitclri\\.w.*lsx_vbitclri_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclri_d:.*vbitclri\\.d.*lsx_vbitclri_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitset_b:.*vbitset\\.b.*lsx_vbitset_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitset_h:.*vbitset\\.h.*lsx_vbitset_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitset_w:.*vbitset\\.w.*lsx_vbitset_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitset_d:.*vbitset\\.d.*lsx_vbitset_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitseti_b:.*vbitseti\\.b.*lsx_vbitseti_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitseti_h:.*vbitseti\\.h.*lsx_vbitseti_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitseti_w:.*vbitseti\\.w.*lsx_vbitseti_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitseti_d:.*vbitseti\\.d.*lsx_vbitseti_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrev_b:.*vbitrev\\.b.*lsx_vbitrev_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrev_h:.*vbitrev\\.h.*lsx_vbitrev_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrev_w:.*vbitrev\\.w.*lsx_vbitrev_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrev_d:.*vbitrev\\.d.*lsx_vbitrev_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrevi_b:.*vbitrevi\\.b.*lsx_vbitrevi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrevi_h:.*vbitrevi\\.h.*lsx_vbitrevi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrevi_w:.*vbitrevi\\.w.*lsx_vbitrevi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrevi_d:.*vbitrevi\\.d.*lsx_vbitrevi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadd_b:.*vadd\\.b.*lsx_vadd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadd_h:.*vadd\\.h.*lsx_vadd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadd_w:.*vadd\\.w.*lsx_vadd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadd_d:.*vadd\\.d.*lsx_vadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddi_bu:.*vaddi\\.bu.*lsx_vaddi_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddi_hu:.*vaddi\\.hu.*lsx_vaddi_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddi_wu:.*vaddi\\.wu.*lsx_vaddi_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddi_du:.*vaddi\\.du.*lsx_vaddi_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsub_b:.*vsub\\.b.*lsx_vsub_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsub_h:.*vsub\\.h.*lsx_vsub_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsub_w:.*vsub\\.w.*lsx_vsub_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsub_d:.*vsub\\.d.*lsx_vsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubi_bu:.*vsubi\\.bu.*lsx_vsubi_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubi_hu:.*vsubi\\.hu.*lsx_vsubi_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubi_wu:.*vsubi\\.wu.*lsx_vsubi_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubi_du:.*vsubi\\.du.*lsx_vsubi_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_b:.*vmax\\.b.*lsx_vmax_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_h:.*vmax\\.h.*lsx_vmax_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_w:.*vmax\\.w.*lsx_vmax_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_d:.*vmax\\.d.*lsx_vmax_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_b:.*vmaxi\\.b.*lsx_vmaxi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_h:.*vmaxi\\.h.*lsx_vmaxi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_w:.*vmaxi\\.w.*lsx_vmaxi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_d:.*vmaxi\\.d.*lsx_vmaxi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_bu:.*vmax\\.bu.*lsx_vmax_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_hu:.*vmax\\.hu.*lsx_vmax_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_wu:.*vmax\\.wu.*lsx_vmax_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_du:.*vmax\\.du.*lsx_vmax_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_bu:.*vmaxi\\.bu.*lsx_vmaxi_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_hu:.*vmaxi\\.hu.*lsx_vmaxi_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_wu:.*vmaxi\\.wu.*lsx_vmaxi_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_du:.*vmaxi\\.du.*lsx_vmaxi_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_b:.*vmin\\.b.*lsx_vmin_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_h:.*vmin\\.h.*lsx_vmin_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_w:.*vmin\\.w.*lsx_vmin_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_d:.*vmin\\.d.*lsx_vmin_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_b:.*vmini\\.b.*lsx_vmini_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_h:.*vmini\\.h.*lsx_vmini_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_w:.*vmini\\.w.*lsx_vmini_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_d:.*vmini\\.d.*lsx_vmini_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_bu:.*vmin\\.bu.*lsx_vmin_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_hu:.*vmin\\.hu.*lsx_vmin_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_wu:.*vmin\\.wu.*lsx_vmin_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_du:.*vmin\\.du.*lsx_vmin_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_bu:.*vmini\\.bu.*lsx_vmini_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_hu:.*vmini\\.hu.*lsx_vmini_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_wu:.*vmini\\.wu.*lsx_vmini_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_du:.*vmini\\.du.*lsx_vmini_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseq_b:.*vseq\\.b.*lsx_vseq_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseq_h:.*vseq\\.h.*lsx_vseq_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseq_w:.*vseq\\.w.*lsx_vseq_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseq_d:.*vseq\\.d.*lsx_vseq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseqi_b:.*vseqi\\.b.*lsx_vseqi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseqi_h:.*vseqi\\.h.*lsx_vseqi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseqi_w:.*vseqi\\.w.*lsx_vseqi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseqi_d:.*vseqi\\.d.*lsx_vseqi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_b:.*vslti\\.b.*lsx_vslti_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_b:.*vslt\\.b.*lsx_vslt_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_h:.*vslt\\.h.*lsx_vslt_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_w:.*vslt\\.w.*lsx_vslt_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_d:.*vslt\\.d.*lsx_vslt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_h:.*vslti\\.h.*lsx_vslti_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_w:.*vslti\\.w.*lsx_vslti_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_d:.*vslti\\.d.*lsx_vslti_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_bu:.*vslt\\.bu.*lsx_vslt_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_hu:.*vslt\\.hu.*lsx_vslt_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_wu:.*vslt\\.wu.*lsx_vslt_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_du:.*vslt\\.du.*lsx_vslt_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_bu:.*vslti\\.bu.*lsx_vslti_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_hu:.*vslti\\.hu.*lsx_vslti_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_wu:.*vslti\\.wu.*lsx_vslti_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_du:.*vslti\\.du.*lsx_vslti_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_b:.*vsle\\.b.*lsx_vsle_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_h:.*vsle\\.h.*lsx_vsle_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_w:.*vsle\\.w.*lsx_vsle_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_d:.*vsle\\.d.*lsx_vsle_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_b:.*vslei\\.b.*lsx_vslei_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_h:.*vslei\\.h.*lsx_vslei_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_w:.*vslei\\.w.*lsx_vslei_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_d:.*vslei\\.d.*lsx_vslei_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_bu:.*vsle\\.bu.*lsx_vsle_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_hu:.*vsle\\.hu.*lsx_vsle_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_wu:.*vsle\\.wu.*lsx_vsle_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_du:.*vsle\\.du.*lsx_vsle_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_bu:.*vslei\\.bu.*lsx_vslei_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_hu:.*vslei\\.hu.*lsx_vslei_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_wu:.*vslei\\.wu.*lsx_vslei_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_du:.*vslei\\.du.*lsx_vslei_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_b:.*vsat\\.b.*lsx_vsat_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_h:.*vsat\\.h.*lsx_vsat_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_w:.*vsat\\.w.*lsx_vsat_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_d:.*vsat\\.d.*lsx_vsat_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_bu:.*vsat\\.bu.*lsx_vsat_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_hu:.*vsat\\.hu.*lsx_vsat_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_wu:.*vsat\\.wu.*lsx_vsat_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_du:.*vsat\\.du.*lsx_vsat_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadda_b:.*vadda\\.b.*lsx_vadda_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadda_h:.*vadda\\.h.*lsx_vadda_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadda_w:.*vadda\\.w.*lsx_vadda_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadda_d:.*vadda\\.d.*lsx_vadda_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_b:.*vsadd\\.b.*lsx_vsadd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_h:.*vsadd\\.h.*lsx_vsadd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_w:.*vsadd\\.w.*lsx_vsadd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_d:.*vsadd\\.d.*lsx_vsadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_bu:.*vsadd\\.bu.*lsx_vsadd_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_hu:.*vsadd\\.hu.*lsx_vsadd_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_wu:.*vsadd\\.wu.*lsx_vsadd_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_du:.*vsadd\\.du.*lsx_vsadd_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_b:.*vavg\\.b.*lsx_vavg_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_h:.*vavg\\.h.*lsx_vavg_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_w:.*vavg\\.w.*lsx_vavg_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_d:.*vavg\\.d.*lsx_vavg_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_bu:.*vavg\\.bu.*lsx_vavg_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_hu:.*vavg\\.hu.*lsx_vavg_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_wu:.*vavg\\.wu.*lsx_vavg_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_du:.*vavg\\.du.*lsx_vavg_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_b:.*vavgr\\.b.*lsx_vavgr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_h:.*vavgr\\.h.*lsx_vavgr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_w:.*vavgr\\.w.*lsx_vavgr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_d:.*vavgr\\.d.*lsx_vavgr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_bu:.*vavgr\\.bu.*lsx_vavgr_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_hu:.*vavgr\\.hu.*lsx_vavgr_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_wu:.*vavgr\\.wu.*lsx_vavgr_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_du:.*vavgr\\.du.*lsx_vavgr_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_b:.*vssub\\.b.*lsx_vssub_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_h:.*vssub\\.h.*lsx_vssub_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_w:.*vssub\\.w.*lsx_vssub_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_d:.*vssub\\.d.*lsx_vssub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_bu:.*vssub\\.bu.*lsx_vssub_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_hu:.*vssub\\.hu.*lsx_vssub_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_wu:.*vssub\\.wu.*lsx_vssub_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_du:.*vssub\\.du.*lsx_vssub_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_b:.*vabsd\\.b.*lsx_vabsd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_h:.*vabsd\\.h.*lsx_vabsd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_w:.*vabsd\\.w.*lsx_vabsd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_d:.*vabsd\\.d.*lsx_vabsd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_bu:.*vabsd\\.bu.*lsx_vabsd_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_hu:.*vabsd\\.hu.*lsx_vabsd_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_wu:.*vabsd\\.wu.*lsx_vabsd_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_du:.*vabsd\\.du.*lsx_vabsd_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmul_b:.*vmul\\.b.*lsx_vmul_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmul_h:.*vmul\\.h.*lsx_vmul_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmul_w:.*vmul\\.w.*lsx_vmul_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmul_d:.*vmul\\.d.*lsx_vmul_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmadd_b:.*vmadd\\.b.*lsx_vmadd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmadd_h:.*vmadd\\.h.*lsx_vmadd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmadd_w:.*vmadd\\.w.*lsx_vmadd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmadd_d:.*vmadd\\.d.*lsx_vmadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmsub_b:.*vmsub\\.b.*lsx_vmsub_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmsub_h:.*vmsub\\.h.*lsx_vmsub_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmsub_w:.*vmsub\\.w.*lsx_vmsub_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmsub_d:.*vmsub\\.d.*lsx_vmsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_b:.*vdiv\\.b.*lsx_vdiv_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_h:.*vdiv\\.h.*lsx_vdiv_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_w:.*vdiv\\.w.*lsx_vdiv_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_d:.*vdiv\\.d.*lsx_vdiv_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_bu:.*vdiv\\.bu.*lsx_vdiv_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_hu:.*vdiv\\.hu.*lsx_vdiv_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_wu:.*vdiv\\.wu.*lsx_vdiv_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_du:.*vdiv\\.du.*lsx_vdiv_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_h_b:.*vhaddw\\.h\\.b.*lsx_vhaddw_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_w_h:.*vhaddw\\.w\\.h.*lsx_vhaddw_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_d_w:.*vhaddw\\.d\\.w.*lsx_vhaddw_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_hu_bu:.*vhaddw\\.hu\\.bu.*lsx_vhaddw_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_wu_hu:.*vhaddw\\.wu\\.hu.*lsx_vhaddw_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_du_wu:.*vhaddw\\.du\\.wu.*lsx_vhaddw_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_h_b:.*vhsubw\\.h\\.b.*lsx_vhsubw_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_w_h:.*vhsubw\\.w\\.h.*lsx_vhsubw_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_d_w:.*vhsubw\\.d\\.w.*lsx_vhsubw_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_hu_bu:.*vhsubw\\.hu\\.bu.*lsx_vhsubw_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_wu_hu:.*vhsubw\\.wu\\.hu.*lsx_vhsubw_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_du_wu:.*vhsubw\\.du\\.wu.*lsx_vhsubw_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_b:.*vmod\\.b.*lsx_vmod_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_h:.*vmod\\.h.*lsx_vmod_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_w:.*vmod\\.w.*lsx_vmod_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_d:.*vmod\\.d.*lsx_vmod_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_bu:.*vmod\\.bu.*lsx_vmod_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_hu:.*vmod\\.hu.*lsx_vmod_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_wu:.*vmod\\.wu.*lsx_vmod_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_du:.*vmod\\.du.*lsx_vmod_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplve_b:.*vreplve\\.b.*lsx_vreplve_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplve_h:.*vreplve\\.h.*lsx_vreplve_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplve_w:.*vreplve\\.w.*lsx_vreplve_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplve_d:.*vreplve\\.d.*lsx_vreplve_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplvei_b:.*vreplvei\\.b.*lsx_vreplvei_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplvei_h:.*vreplvei\\.h.*lsx_vreplvei_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplvei_w:.*vreplvei\\.w.*lsx_vreplvei_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplvei_d:.*vreplvei\\.d.*lsx_vreplvei_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickev_b:.*vpickev\\.b.*lsx_vpickev_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickev_h:.*vpickev\\.h.*lsx_vpickev_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickev_w:.*vpickev\\.w.*lsx_vpickev_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickev_d:.*vilvl\\.d.*lsx_vpickev_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickod_b:.*vpickod\\.b.*lsx_vpickod_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickod_h:.*vpickod\\.h.*lsx_vpickod_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickod_w:.*vpickod\\.w.*lsx_vpickod_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickod_d:.*vilvh\\.d.*lsx_vpickod_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvh_b:.*vilvh\\.b.*lsx_vilvh_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvh_h:.*vilvh\\.h.*lsx_vilvh_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvh_w:.*vilvh\\.w.*lsx_vilvh_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvh_d:.*vilvh\\.d.*lsx_vilvh_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvl_b:.*vilvl\\.b.*lsx_vilvl_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvl_h:.*vilvl\\.h.*lsx_vilvl_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvl_w:.*vilvl\\.w.*lsx_vilvl_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvl_d:.*vilvl\\.d.*lsx_vilvl_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackev_b:.*vpackev\\.b.*lsx_vpackev_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackev_h:.*vpackev\\.h.*lsx_vpackev_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackev_w:.*vpackev\\.w.*lsx_vpackev_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackev_d:.*vilvl\\.d.*lsx_vpackev_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackod_b:.*vpackod\\.b.*lsx_vpackod_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackod_h:.*vpackod\\.h.*lsx_vpackod_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackod_w:.*vpackod\\.w.*lsx_vpackod_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackod_d:.*vilvh\\.d.*lsx_vpackod_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf_h:.*vshuf\\.h.*lsx_vshuf_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf_w:.*vshuf\\.w.*lsx_vshuf_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf_d:.*vshuf\\.d.*lsx_vshuf_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vand_v:.*vand\\.v.*lsx_vand_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vandi_b:.*vandi\\.b.*lsx_vandi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vor_v:.*vor\\.v.*lsx_vor_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vori_b:.*vbitseti\\.b.*lsx_vori_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vnor_v:.*vnor\\.v.*lsx_vnor_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vnori_b:.*vnori\\.b.*lsx_vnori_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vxor_v:.*vxor\\.v.*lsx_vxor_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vxori_b:.*vbitrevi\\.b.*lsx_vxori_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitsel_v:.*vbitsel\\.v.*lsx_vbitsel_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitseli_b:.*vbitseli\\.b.*lsx_vbitseli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf4i_b:.*vshuf4i\\.b.*lsx_vshuf4i_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf4i_h:.*vshuf4i\\.h.*lsx_vshuf4i_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf4i_w:.*vshuf4i\\.w.*lsx_vshuf4i_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_b:.*vreplgr2vr\\.b.*lsx_vreplgr2vr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_h:.*vreplgr2vr\\.h.*lsx_vreplgr2vr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_w:.*vreplgr2vr\\.w.*lsx_vreplgr2vr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_d:.*vreplgr2vr\\.d.*lsx_vreplgr2vr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpcnt_b:.*vpcnt\\.b.*lsx_vpcnt_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpcnt_h:.*vpcnt\\.h.*lsx_vpcnt_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpcnt_w:.*vpcnt\\.w.*lsx_vpcnt_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpcnt_d:.*vpcnt\\.d.*lsx_vpcnt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclo_b:.*vclo\\.b.*lsx_vclo_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclo_h:.*vclo\\.h.*lsx_vclo_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclo_w:.*vclo\\.w.*lsx_vclo_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclo_d:.*vclo\\.d.*lsx_vclo_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclz_b:.*vclz\\.b.*lsx_vclz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclz_h:.*vclz\\.h.*lsx_vclz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclz_w:.*vclz\\.w.*lsx_vclz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclz_d:.*vclz\\.d.*lsx_vclz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_b:.*vpickve2gr\\.b.*lsx_vpickve2gr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_h:.*vpickve2gr\\.h.*lsx_vpickve2gr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_w:.*vpickve2gr\\.w.*lsx_vpickve2gr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_d:.*vpickve2gr\\.d.*lsx_vpickve2gr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_bu:.*vpickve2gr\\.bu.*lsx_vpickve2gr_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_hu:.*vpickve2gr\\.hu.*lsx_vpickve2gr_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_wu:.*vpickve2gr\\.wu.*lsx_vpickve2gr_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_du:.*vpickve2gr\\.du.*lsx_vpickve2gr_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_b:.*vinsgr2vr\\.b.*lsx_vinsgr2vr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_h:.*vinsgr2vr\\.h.*lsx_vinsgr2vr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_w:.*vinsgr2vr\\.w.*lsx_vinsgr2vr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_d:.*vinsgr2vr\\.d.*lsx_vinsgr2vr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfadd_s:.*vfadd\\.s.*lsx_vfadd_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfadd_d:.*vfadd\\.d.*lsx_vfadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfsub_s:.*vfsub\\.s.*lsx_vfsub_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfsub_d:.*vfsub\\.d.*lsx_vfsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmul_s:.*vfmul\\.s.*lsx_vfmul_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmul_d:.*vfmul\\.d.*lsx_vfmul_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfdiv_s:.*vfdiv\\.s.*lsx_vfdiv_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfdiv_d:.*vfdiv\\.d.*lsx_vfdiv_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcvt_h_s:.*vfcvt\\.h\\.s.*lsx_vfcvt_h_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcvt_s_d:.*vfcvt\\.s\\.d.*lsx_vfcvt_s_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmin_s:.*vfmin\\.s.*lsx_vfmin_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmin_d:.*vfmin\\.d.*lsx_vfmin_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmina_s:.*vfmina\\.s.*lsx_vfmina_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmina_d:.*vfmina\\.d.*lsx_vfmina_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmax_s:.*vfmax\\.s.*lsx_vfmax_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmax_d:.*vfmax\\.d.*lsx_vfmax_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmaxa_s:.*vfmaxa\\.s.*lsx_vfmaxa_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmaxa_d:.*vfmaxa\\.d.*lsx_vfmaxa_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfclass_s:.*vfclass\\.s.*lsx_vfclass_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfclass_d:.*vfclass\\.d.*lsx_vfclass_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfsqrt_s:.*vfsqrt\\.s.*lsx_vfsqrt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfsqrt_d:.*vfsqrt\\.d.*lsx_vfsqrt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrecip_s:.*vfrecip\\.s.*lsx_vfrecip_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrecip_d:.*vfrecip\\.d.*lsx_vfrecip_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrint_s:.*vfrint\\.s.*lsx_vfrint_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrint_d:.*vfrint\\.d.*lsx_vfrint_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrsqrt_s:.*vfrsqrt\\.s.*lsx_vfrsqrt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrsqrt_d:.*vfrsqrt\\.d.*lsx_vfrsqrt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vflogb_s:.*vflogb\\.s.*lsx_vflogb_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vflogb_d:.*vflogb\\.d.*lsx_vflogb_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcvth_s_h:.*vfcvth\\.s\\.h.*lsx_vfcvth_s_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcvth_d_s:.*vfcvth\\.d\\.s.*lsx_vfcvth_d_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcvtl_s_h:.*vfcvtl\\.s\\.h.*lsx_vfcvtl_s_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcvtl_d_s:.*vfcvtl\\.d\\.s.*lsx_vfcvtl_d_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftint_w_s:.*vftint\\.w\\.s.*lsx_vftint_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftint_l_d:.*vftint\\.l\\.d.*lsx_vftint_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftint_wu_s:.*vftint\\.wu\\.s.*lsx_vftint_wu_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftint_lu_d:.*vftint\\.lu\\.d.*lsx_vftint_lu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrz_w_s:.*vftintrz\\.w\\.s.*lsx_vftintrz_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrz_l_d:.*vftintrz\\.l\\.d.*lsx_vftintrz_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrz_wu_s:.*vftintrz\\.wu\\.s.*lsx_vftintrz_wu_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrz_lu_d:.*vftintrz\\.lu\\.d.*lsx_vftintrz_lu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffint_s_w:.*vffint\\.s\\.w.*lsx_vffint_s_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffint_d_l:.*vffint\\.d\\.l.*lsx_vffint_d_l" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffint_s_wu:.*vffint\\.s\\.wu.*lsx_vffint_s_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffint_d_lu:.*vffint\\.d\\.lu.*lsx_vffint_d_lu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vandn_v:.*vandn\\.v.*lsx_vandn_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vneg_b:.*vneg\\.b.*lsx_vneg_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vneg_h:.*vneg\\.h.*lsx_vneg_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vneg_w:.*vneg\\.w.*lsx_vneg_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vneg_d:.*vneg\\.d.*lsx_vneg_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_b:.*vmuh\\.b.*lsx_vmuh_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_h:.*vmuh\\.h.*lsx_vmuh_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_w:.*vmuh\\.w.*lsx_vmuh_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_d:.*vmuh\\.d.*lsx_vmuh_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_bu:.*vmuh\\.bu.*lsx_vmuh_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_hu:.*vmuh\\.hu.*lsx_vmuh_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_wu:.*vmuh\\.wu.*lsx_vmuh_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_du:.*vmuh\\.du.*lsx_vmuh_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsllwil_h_b:.*vsllwil\\.h\\.b.*lsx_vsllwil_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsllwil_w_h:.*vsllwil\\.w\\.h.*lsx_vsllwil_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsllwil_d_w:.*vsllwil\\.d\\.w.*lsx_vsllwil_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsllwil_hu_bu:.*vsllwil\\.hu\\.bu.*lsx_vsllwil_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsllwil_wu_hu:.*vsllwil\\.wu\\.hu.*lsx_vsllwil_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsllwil_du_wu:.*vsllwil\\.du\\.wu.*lsx_vsllwil_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsran_b_h:.*vsran\\.b\\.h.*lsx_vsran_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsran_h_w:.*vsran\\.h\\.w.*lsx_vsran_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsran_w_d:.*vsran\\.w\\.d.*lsx_vsran_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssran_b_h:.*vssran\\.b\\.h.*lsx_vssran_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssran_h_w:.*vssran\\.h\\.w.*lsx_vssran_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssran_w_d:.*vssran\\.w\\.d.*lsx_vssran_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssran_bu_h:.*vssran\\.bu\\.h.*lsx_vssran_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssran_hu_w:.*vssran\\.hu\\.w.*lsx_vssran_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssran_wu_d:.*vssran\\.wu\\.d.*lsx_vssran_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarn_b_h:.*vsrarn\\.b\\.h.*lsx_vsrarn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarn_h_w:.*vsrarn\\.h\\.w.*lsx_vsrarn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarn_w_d:.*vsrarn\\.w\\.d.*lsx_vsrarn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarn_b_h:.*vssrarn\\.b\\.h.*lsx_vssrarn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarn_h_w:.*vssrarn\\.h\\.w.*lsx_vssrarn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarn_w_d:.*vssrarn\\.w\\.d.*lsx_vssrarn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarn_bu_h:.*vssrarn\\.bu\\.h.*lsx_vssrarn_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarn_hu_w:.*vssrarn\\.hu\\.w.*lsx_vssrarn_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarn_wu_d:.*vssrarn\\.wu\\.d.*lsx_vssrarn_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrln_b_h:.*vsrln\\.b\\.h.*lsx_vsrln_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrln_h_w:.*vsrln\\.h\\.w.*lsx_vsrln_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrln_w_d:.*vsrln\\.w\\.d.*lsx_vsrln_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrln_bu_h:.*vssrln\\.bu\\.h.*lsx_vssrln_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrln_hu_w:.*vssrln\\.hu\\.w.*lsx_vssrln_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrln_wu_d:.*vssrln\\.wu\\.d.*lsx_vssrln_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrn_b_h:.*vsrlrn\\.b\\.h.*lsx_vsrlrn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrn_h_w:.*vsrlrn\\.h\\.w.*lsx_vsrlrn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrn_w_d:.*vsrlrn\\.w\\.d.*lsx_vsrlrn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrn_bu_h:.*vssrlrn\\.bu\\.h.*lsx_vssrlrn_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrn_hu_w:.*vssrlrn\\.hu\\.w.*lsx_vssrlrn_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrn_wu_d:.*vssrlrn\\.wu\\.d.*lsx_vssrlrn_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrstpi_b:.*vfrstpi\\.b.*lsx_vfrstpi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrstpi_h:.*vfrstpi\\.h.*lsx_vfrstpi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrstp_b:.*vfrstp\\.b.*lsx_vfrstp_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrstp_h:.*vfrstp\\.h.*lsx_vfrstp_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf4i_d:.*vshuf4i\\.d.*lsx_vshuf4i_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbsrl_v:.*vbsrl\\.v.*lsx_vbsrl_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbsll_v:.*vbsll\\.v.*lsx_vbsll_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vextrins_b:.*vextrins\\.b.*lsx_vextrins_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vextrins_h:.*vextrins\\.h.*lsx_vextrins_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vextrins_w:.*vextrins\\.w.*lsx_vextrins_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vextrins_d:.*vextrins\\.d.*lsx_vextrins_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmskltz_b:.*vmskltz\\.b.*lsx_vmskltz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmskltz_h:.*vmskltz\\.h.*lsx_vmskltz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmskltz_w:.*vmskltz\\.w.*lsx_vmskltz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmskltz_d:.*vmskltz\\.d.*lsx_vmskltz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsigncov_b:.*vsigncov\\.b.*lsx_vsigncov_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsigncov_h:.*vsigncov\\.h.*lsx_vsigncov_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsigncov_w:.*vsigncov\\.w.*lsx_vsigncov_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsigncov_d:.*vsigncov\\.d.*lsx_vsigncov_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmadd_s:.*vfmadd\\.s.*lsx_vfmadd_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmadd_d:.*vfmadd\\.d.*lsx_vfmadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmsub_s:.*vfmsub\\.s.*lsx_vfmsub_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmsub_d:.*vfmsub\\.d.*lsx_vfmsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfnmadd_s:.*vfnmadd\\.s.*lsx_vfnmadd_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfnmadd_d:.*vfnmadd\\.d.*lsx_vfnmadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfnmsub_s:.*vfnmsub\\.s.*lsx_vfnmsub_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfnmsub_d:.*vfnmsub\\.d.*lsx_vfnmsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrne_w_s:.*vftintrne\\.w\\.s.*lsx_vftintrne_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrne_l_d:.*vftintrne\\.l\\.d.*lsx_vftintrne_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrp_w_s:.*vftintrp\\.w\\.s.*lsx_vftintrp_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrp_l_d:.*vftintrp\\.l\\.d.*lsx_vftintrp_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrm_w_s:.*vftintrm\\.w\\.s.*lsx_vftintrm_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrm_l_d:.*vftintrm\\.l\\.d.*lsx_vftintrm_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftint_w_d:.*vftint\\.w\\.d.*lsx_vftint_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffint_s_l:.*vffint\\.s\\.l.*lsx_vffint_s_l" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrz_w_d:.*vftintrz\\.w\\.d.*lsx_vftintrz_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrp_w_d:.*vftintrp\\.w\\.d.*lsx_vftintrp_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrm_w_d:.*vftintrm\\.w\\.d.*lsx_vftintrm_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrne_w_d:.*vftintrne\\.w\\.d.*lsx_vftintrne_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintl_l_s:.*vftintl\\.l\\.s.*lsx_vftintl_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftinth_l_s:.*vftinth\\.l\\.s.*lsx_vftinth_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffinth_d_w:.*vffinth\\.d\\.w.*lsx_vffinth_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffintl_d_w:.*vffintl\\.d\\.w.*lsx_vffintl_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrzl_l_s:.*vftintrzl\\.l\\.s.*lsx_vftintrzl_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrzh_l_s:.*vftintrzh\\.l\\.s.*lsx_vftintrzh_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrpl_l_s:.*vftintrpl\\.l\\.s.*lsx_vftintrpl_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrph_l_s:.*vftintrph\\.l\\.s.*lsx_vftintrph_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrml_l_s:.*vftintrml\\.l\\.s.*lsx_vftintrml_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrmh_l_s:.*vftintrmh\\.l\\.s.*lsx_vftintrmh_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrnel_l_s:.*vftintrnel\\.l\\.s.*lsx_vftintrnel_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrneh_l_s:.*vftintrneh\\.l\\.s.*lsx_vftintrneh_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrne_s:.*vfrintrne\\.s.*lsx_vfrintrne_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrne_d:.*vfrintrne\\.d.*lsx_vfrintrne_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrz_s:.*vfrintrz\\.s.*lsx_vfrintrz_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrz_d:.*vfrintrz\\.d.*lsx_vfrintrz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrp_s:.*vfrintrp\\.s.*lsx_vfrintrp_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrp_d:.*vfrintrp\\.d.*lsx_vfrintrp_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrm_s:.*vfrintrm\\.s.*lsx_vfrintrm_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrm_d:.*vfrintrm\\.d.*lsx_vfrintrm_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vstelm_b:.*vstelm\\.b.*lsx_vstelm_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vstelm_h:.*vstelm\\.h.*lsx_vstelm_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vstelm_w:.*vstelm\\.w.*lsx_vstelm_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vstelm_d:.*vstelm\\.d.*lsx_vstelm_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_d_w:.*vaddwev\\.d\\.w.*lsx_vaddwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_w_h:.*vaddwev\\.w\\.h.*lsx_vaddwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_h_b:.*vaddwev\\.h\\.b.*lsx_vaddwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_d_w:.*vaddwod\\.d\\.w.*lsx_vaddwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_w_h:.*vaddwod\\.w\\.h.*lsx_vaddwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_h_b:.*vaddwod\\.h\\.b.*lsx_vaddwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_d_wu:.*vaddwev\\.d\\.wu.*lsx_vaddwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_w_hu:.*vaddwev\\.w\\.hu.*lsx_vaddwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_h_bu:.*vaddwev\\.h\\.bu.*lsx_vaddwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_d_wu:.*vaddwod\\.d\\.wu.*lsx_vaddwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_w_hu:.*vaddwod\\.w\\.hu.*lsx_vaddwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_h_bu:.*vaddwod\\.h\\.bu.*lsx_vaddwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_d_wu_w:.*vaddwev\\.d\\.wu\\.w.*lsx_vaddwev_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_w_hu_h:.*vaddwev\\.w\\.hu\\.h.*lsx_vaddwev_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_h_bu_b:.*vaddwev\\.h\\.bu\\.b.*lsx_vaddwev_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_d_wu_w:.*vaddwod\\.d\\.wu\\.w.*lsx_vaddwod_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_w_hu_h:.*vaddwod\\.w\\.hu\\.h.*lsx_vaddwod_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_h_bu_b:.*vaddwod\\.h\\.bu\\.b.*lsx_vaddwod_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_d_w:.*vsubwev\\.d\\.w.*lsx_vsubwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_w_h:.*vsubwev\\.w\\.h.*lsx_vsubwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_h_b:.*vsubwev\\.h\\.b.*lsx_vsubwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_d_w:.*vsubwod\\.d\\.w.*lsx_vsubwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_w_h:.*vsubwod\\.w\\.h.*lsx_vsubwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_h_b:.*vsubwod\\.h\\.b.*lsx_vsubwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_d_wu:.*vsubwev\\.d\\.wu.*lsx_vsubwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_w_hu:.*vsubwev\\.w\\.hu.*lsx_vsubwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_h_bu:.*vsubwev\\.h\\.bu.*lsx_vsubwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_d_wu:.*vsubwod\\.d\\.wu.*lsx_vsubwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_w_hu:.*vsubwod\\.w\\.hu.*lsx_vsubwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_h_bu:.*vsubwod\\.h\\.bu.*lsx_vsubwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_q_d:.*vaddwev\\.q\\.d.*lsx_vaddwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_q_d:.*vaddwod\\.q\\.d.*lsx_vaddwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_q_du:.*vaddwev\\.q\\.du.*lsx_vaddwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_q_du:.*vaddwod\\.q\\.du.*lsx_vaddwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_q_d:.*vsubwev\\.q\\.d.*lsx_vsubwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_q_d:.*vsubwod\\.q\\.d.*lsx_vsubwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_q_du:.*vsubwev\\.q\\.du.*lsx_vsubwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_q_du:.*vsubwod\\.q\\.du.*lsx_vsubwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_q_du_d:.*vaddwev\\.q\\.du\\.d.*lsx_vaddwev_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_q_du_d:.*vaddwod\\.q\\.du\\.d.*lsx_vaddwod_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_d_w:.*vmulwev\\.d\\.w.*lsx_vmulwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_w_h:.*vmulwev\\.w\\.h.*lsx_vmulwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_h_b:.*vmulwev\\.h\\.b.*lsx_vmulwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_d_w:.*vmulwod\\.d\\.w.*lsx_vmulwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_w_h:.*vmulwod\\.w\\.h.*lsx_vmulwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_h_b:.*vmulwod\\.h\\.b.*lsx_vmulwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_d_wu:.*vmulwev\\.d\\.wu.*lsx_vmulwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_w_hu:.*vmulwev\\.w\\.hu.*lsx_vmulwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_h_bu:.*vmulwev\\.h\\.bu.*lsx_vmulwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_d_wu:.*vmulwod\\.d\\.wu.*lsx_vmulwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_w_hu:.*vmulwod\\.w\\.hu.*lsx_vmulwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_h_bu:.*vmulwod\\.h\\.bu.*lsx_vmulwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_d_wu_w:.*vmulwev\\.d\\.wu\\.w.*lsx_vmulwev_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_w_hu_h:.*vmulwev\\.w\\.hu\\.h.*lsx_vmulwev_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_h_bu_b:.*vmulwev\\.h\\.bu\\.b.*lsx_vmulwev_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_d_wu_w:.*vmulwod\\.d\\.wu\\.w.*lsx_vmulwod_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_w_hu_h:.*vmulwod\\.w\\.hu\\.h.*lsx_vmulwod_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_h_bu_b:.*vmulwod\\.h\\.bu\\.b.*lsx_vmulwod_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_q_d:.*vmulwev\\.q\\.d.*lsx_vmulwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_q_d:.*vmulwod\\.q\\.d.*lsx_vmulwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_q_du:.*vmulwev\\.q\\.du.*lsx_vmulwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_q_du:.*vmulwod\\.q\\.du.*lsx_vmulwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_q_du_d:.*vmulwev\\.q\\.du\\.d.*lsx_vmulwev_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_q_du_d:.*vmulwod\\.q\\.du\\.d.*lsx_vmulwod_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_q_d:.*vhaddw\\.q\\.d.*lsx_vhaddw_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_qu_du:.*vhaddw\\.qu\\.du.*lsx_vhaddw_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_q_d:.*vhsubw\\.q\\.d.*lsx_vhsubw_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_qu_du:.*vhsubw\\.qu\\.du.*lsx_vhsubw_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_d_w:.*vmaddwev\\.d\\.w.*lsx_vmaddwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_w_h:.*vmaddwev\\.w\\.h.*lsx_vmaddwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_h_b:.*vmaddwev\\.h\\.b.*lsx_vmaddwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_d_wu:.*vmaddwev\\.d\\.wu.*lsx_vmaddwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_w_hu:.*vmaddwev\\.w\\.hu.*lsx_vmaddwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_h_bu:.*vmaddwev\\.h\\.bu.*lsx_vmaddwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_d_w:.*vmaddwod\\.d\\.w.*lsx_vmaddwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_w_h:.*vmaddwod\\.w\\.h.*lsx_vmaddwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_h_b:.*vmaddwod\\.h\\.b.*lsx_vmaddwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_d_wu:.*vmaddwod\\.d\\.wu.*lsx_vmaddwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_w_hu:.*vmaddwod\\.w\\.hu.*lsx_vmaddwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_h_bu:.*vmaddwod\\.h\\.bu.*lsx_vmaddwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_d_wu_w:.*vmaddwev\\.d\\.wu\\.w.*lsx_vmaddwev_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_w_hu_h:.*vmaddwev\\.w\\.hu\\.h.*lsx_vmaddwev_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_h_bu_b:.*vmaddwev\\.h\\.bu\\.b.*lsx_vmaddwev_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_d_wu_w:.*vmaddwod\\.d\\.wu\\.w.*lsx_vmaddwod_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_w_hu_h:.*vmaddwod\\.w\\.hu\\.h.*lsx_vmaddwod_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_h_bu_b:.*vmaddwod\\.h\\.bu\\.b.*lsx_vmaddwod_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_q_d:.*vmaddwev\\.q\\.d.*lsx_vmaddwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_q_d:.*vmaddwod\\.q\\.d.*lsx_vmaddwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_q_du:.*vmaddwev\\.q\\.du.*lsx_vmaddwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_q_du:.*vmaddwod\\.q\\.du.*lsx_vmaddwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_q_du_d:.*vmaddwev\\.q\\.du\\.d.*lsx_vmaddwev_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_q_du_d:.*vmaddwod\\.q\\.du\\.d.*lsx_vmaddwod_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotr_b:.*vrotr\\.b.*lsx_vrotr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotr_h:.*vrotr\\.h.*lsx_vrotr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotr_w:.*vrotr\\.w.*lsx_vrotr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotr_d:.*vrotr\\.d.*lsx_vrotr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadd_q:.*vadd\\.q.*lsx_vadd_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsub_q:.*vsub\\.q.*lsx_vsub_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vldrepl_b:.*vldrepl\\.b.*lsx_vldrepl_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vldrepl_h:.*vldrepl\\.h.*lsx_vldrepl_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vldrepl_w:.*vldrepl\\.w.*lsx_vldrepl_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vldrepl_d:.*vldrepl\\.d.*lsx_vldrepl_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmskgez_b:.*vmskgez\\.b.*lsx_vmskgez_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmsknz_b:.*vmsknz\\.b.*lsx_vmsknz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_h_b:.*vexth\\.h\\.b.*lsx_vexth_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_w_h:.*vexth\\.w\\.h.*lsx_vexth_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_d_w:.*vexth\\.d\\.w.*lsx_vexth_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_q_d:.*vexth\\.q\\.d.*lsx_vexth_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_hu_bu:.*vexth\\.hu\\.bu.*lsx_vexth_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_wu_hu:.*vexth\\.wu\\.hu.*lsx_vexth_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_du_wu:.*vexth\\.du\\.wu.*lsx_vexth_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_qu_du:.*vexth\\.qu\\.du.*lsx_vexth_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotri_b:.*vrotri\\.b.*lsx_vrotri_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotri_h:.*vrotri\\.h.*lsx_vrotri_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotri_w:.*vrotri\\.w.*lsx_vrotri_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotri_d:.*vrotri\\.d.*lsx_vrotri_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vextl_q_d:.*vextl\\.q\\.d.*lsx_vextl_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlni_b_h:.*vsrlni\\.b\\.h.*lsx_vsrlni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlni_h_w:.*vsrlni\\.h\\.w.*lsx_vsrlni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlni_w_d:.*vsrlni\\.w\\.d.*lsx_vsrlni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlni_d_q:.*vsrlni\\.d\\.q.*lsx_vsrlni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrni_b_h:.*vsrlrni\\.b\\.h.*lsx_vsrlrni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrni_h_w:.*vsrlrni\\.h\\.w.*lsx_vsrlrni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrni_w_d:.*vsrlrni\\.w\\.d.*lsx_vsrlrni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrni_d_q:.*vsrlrni\\.d\\.q.*lsx_vsrlrni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_b_h:.*vssrlni\\.b\\.h.*lsx_vssrlni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_h_w:.*vssrlni\\.h\\.w.*lsx_vssrlni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_w_d:.*vssrlni\\.w\\.d.*lsx_vssrlni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_d_q:.*vssrlni\\.d\\.q.*lsx_vssrlni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_bu_h:.*vssrlni\\.bu\\.h.*lsx_vssrlni_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_hu_w:.*vssrlni\\.hu\\.w.*lsx_vssrlni_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_wu_d:.*vssrlni\\.wu\\.d.*lsx_vssrlni_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_du_q:.*vssrlni\\.du\\.q.*lsx_vssrlni_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_b_h:.*vssrlrni\\.b\\.h.*lsx_vssrlrni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_h_w:.*vssrlrni\\.h\\.w.*lsx_vssrlrni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_w_d:.*vssrlrni\\.w\\.d.*lsx_vssrlrni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_d_q:.*vssrlrni\\.d\\.q.*lsx_vssrlrni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_bu_h:.*vssrlrni\\.bu\\.h.*lsx_vssrlrni_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_hu_w:.*vssrlrni\\.hu\\.w.*lsx_vssrlrni_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_wu_d:.*vssrlrni\\.wu\\.d.*lsx_vssrlrni_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_du_q:.*vssrlrni\\.du\\.q.*lsx_vssrlrni_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrani_b_h:.*vsrani\\.b\\.h.*lsx_vsrani_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrani_h_w:.*vsrani\\.h\\.w.*lsx_vsrani_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrani_w_d:.*vsrani\\.w\\.d.*lsx_vsrani_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrani_d_q:.*vsrani\\.d\\.q.*lsx_vsrani_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarni_b_h:.*vsrarni\\.b\\.h.*lsx_vsrarni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarni_h_w:.*vsrarni\\.h\\.w.*lsx_vsrarni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarni_w_d:.*vsrarni\\.w\\.d.*lsx_vsrarni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarni_d_q:.*vsrarni\\.d\\.q.*lsx_vsrarni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_b_h:.*vssrani\\.b\\.h.*lsx_vssrani_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_h_w:.*vssrani\\.h\\.w.*lsx_vssrani_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_w_d:.*vssrani\\.w\\.d.*lsx_vssrani_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_d_q:.*vssrani\\.d\\.q.*lsx_vssrani_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_bu_h:.*vssrani\\.bu\\.h.*lsx_vssrani_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_hu_w:.*vssrani\\.hu\\.w.*lsx_vssrani_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_wu_d:.*vssrani\\.wu\\.d.*lsx_vssrani_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_du_q:.*vssrani\\.du\\.q.*lsx_vssrani_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_b_h:.*vssrarni\\.b\\.h.*lsx_vssrarni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_h_w:.*vssrarni\\.h\\.w.*lsx_vssrarni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_w_d:.*vssrarni\\.w\\.d.*lsx_vssrarni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_d_q:.*vssrarni\\.d\\.q.*lsx_vssrarni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_bu_h:.*vssrarni\\.bu\\.h.*lsx_vssrarni_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_hu_w:.*vssrarni\\.hu\\.w.*lsx_vssrarni_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_wu_d:.*vssrarni\\.wu\\.d.*lsx_vssrarni_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_du_q:.*vssrarni\\.du\\.q.*lsx_vssrarni_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpermi_w:.*vpermi\\.w.*lsx_vpermi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vld:.*vld.*lsx_vld" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vst:.*vst.*lsx_vst" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrn_b_h:.*vssrlrn\\.b\\.h.*lsx_vssrlrn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrn_h_w:.*vssrlrn\\.h\\.w.*lsx_vssrlrn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrn_w_d:.*vssrlrn\\.w\\.d.*lsx_vssrlrn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrln_b_h:.*vssrln\\.b\\.h.*lsx_vssrln_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrln_h_w:.*vssrln\\.h\\.w.*lsx_vssrln_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrln_w_d:.*vssrln\\.w\\.d.*lsx_vssrln_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vorn_v:.*vorn\\.v.*lsx_vorn_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vldi:.*vldi.*lsx_vldi" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf_b:.*vshuf\\.b.*lsx_vshuf_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vldx:.*vldx.*lsx_vldx" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vstx:.*vstx.*lsx_vstx" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vextl_qu_du:.*vextl\\.qu\\.du.*lsx_vextl_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bnz_b:.*vsetanyeqz\\.b.*lsx_bnz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bnz_d:.*vsetanyeqz\\.d.*lsx_bnz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bnz_h:.*vsetanyeqz\\.h.*lsx_bnz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bnz_v:.*vseteqz\\.v.*lsx_bnz_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bnz_w:.*vsetanyeqz\\.w.*lsx_bnz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bz_b:.*vsetallnez\\.b.*lsx_bz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bz_d:.*vsetallnez\\.d.*lsx_bz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bz_h:.*vsetallnez\\.h.*lsx_bz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bz_v:.*vsetnez\\.v.*lsx_bz_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bz_w:.*vsetallnez\\.w.*lsx_bz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_caf_d:.*vfcmp\\.caf\\.d.*lsx_vfcmp_caf_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_caf_s:.*vfcmp\\.caf\\.s.*lsx_vfcmp_caf_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_ceq_d:.*vfcmp\\.ceq\\.d.*lsx_vfcmp_ceq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_ceq_s:.*vfcmp\\.ceq\\.s.*lsx_vfcmp_ceq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cle_d:.*vfcmp\\.cle\\.d.*lsx_vfcmp_cle_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cle_s:.*vfcmp\\.cle\\.s.*lsx_vfcmp_cle_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_clt_d:.*vfcmp\\.clt\\.d.*lsx_vfcmp_clt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_clt_s:.*vfcmp\\.clt\\.s.*lsx_vfcmp_clt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cne_d:.*vfcmp\\.cne\\.d.*lsx_vfcmp_cne_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cne_s:.*vfcmp\\.cne\\.s.*lsx_vfcmp_cne_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cor_d:.*vfcmp\\.cor\\.d.*lsx_vfcmp_cor_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cor_s:.*vfcmp\\.cor\\.s.*lsx_vfcmp_cor_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cueq_d:.*vfcmp\\.cueq\\.d.*lsx_vfcmp_cueq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cueq_s:.*vfcmp\\.cueq\\.s.*lsx_vfcmp_cueq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cule_d:.*vfcmp\\.cule\\.d.*lsx_vfcmp_cule_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cule_s:.*vfcmp\\.cule\\.s.*lsx_vfcmp_cule_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cult_d:.*vfcmp\\.cult\\.d.*lsx_vfcmp_cult_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cult_s:.*vfcmp\\.cult\\.s.*lsx_vfcmp_cult_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cun_d:.*vfcmp\\.cun\\.d.*lsx_vfcmp_cun_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cune_d:.*vfcmp\\.cune\\.d.*lsx_vfcmp_cune_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cune_s:.*vfcmp\\.cune\\.s.*lsx_vfcmp_cune_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cun_s:.*vfcmp\\.cun\\.s.*lsx_vfcmp_cun_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_saf_d:.*vfcmp\\.saf\\.d.*lsx_vfcmp_saf_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_saf_s:.*vfcmp\\.saf\\.s.*lsx_vfcmp_saf_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_seq_d:.*vfcmp\\.seq\\.d.*lsx_vfcmp_seq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_seq_s:.*vfcmp\\.seq\\.s.*lsx_vfcmp_seq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sle_d:.*vfcmp\\.sle\\.d.*lsx_vfcmp_sle_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sle_s:.*vfcmp\\.sle\\.s.*lsx_vfcmp_sle_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_slt_d:.*vfcmp\\.slt\\.d.*lsx_vfcmp_slt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_slt_s:.*vfcmp\\.slt\\.s.*lsx_vfcmp_slt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sne_d:.*vfcmp\\.sne\\.d.*lsx_vfcmp_sne_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sne_s:.*vfcmp\\.sne\\.s.*lsx_vfcmp_sne_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sor_d:.*vfcmp\\.sor\\.d.*lsx_vfcmp_sor_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sor_s:.*vfcmp\\.sor\\.s.*lsx_vfcmp_sor_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sueq_d:.*vfcmp\\.sueq\\.d.*lsx_vfcmp_sueq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sueq_s:.*vfcmp\\.sueq\\.s.*lsx_vfcmp_sueq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sule_d:.*vfcmp\\.sule\\.d.*lsx_vfcmp_sule_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sule_s:.*vfcmp\\.sule\\.s.*lsx_vfcmp_sule_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sult_d:.*vfcmp\\.sult\\.d.*lsx_vfcmp_sult_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sult_s:.*vfcmp\\.sult\\.s.*lsx_vfcmp_sult_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sun_d:.*vfcmp\\.sun\\.d.*lsx_vfcmp_sun_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sune_d:.*vfcmp\\.sune\\.d.*lsx_vfcmp_sune_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sune_s:.*vfcmp\\.sune\\.s.*lsx_vfcmp_sune_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sun_s:.*vfcmp\\.sun\\.s.*lsx_vfcmp_sun_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrepli_b:.*vrepli\\.b.*lsx_vrepli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrepli_d:.*vrepli\\.d.*lsx_vrepli_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrepli_h:.*vrepli\\.h.*lsx_vrepli_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrepli_w:.*vrepli\\.w.*lsx_vrepli_w" 1 } } */ ++ ++typedef signed char v16i8 __attribute__ ((vector_size (16), aligned (16))); ++typedef signed char v16i8_b __attribute__ ((vector_size (16), aligned (1))); ++typedef unsigned char v16u8 __attribute__ ((vector_size (16), aligned (16))); ++typedef unsigned char v16u8_b __attribute__ ((vector_size (16), aligned (1))); ++typedef short v8i16 __attribute__ ((vector_size (16), aligned (16))); ++typedef short v8i16_h __attribute__ ((vector_size (16), aligned (2))); ++typedef unsigned short v8u16 __attribute__ ((vector_size (16), aligned (16))); ++typedef unsigned short v8u16_h __attribute__ ((vector_size (16), aligned (2))); ++typedef int v4i32 __attribute__ ((vector_size (16), aligned (16))); ++typedef int v4i32_w __attribute__ ((vector_size (16), aligned (4))); ++typedef unsigned int v4u32 __attribute__ ((vector_size (16), aligned (16))); ++typedef unsigned int v4u32_w __attribute__ ((vector_size (16), aligned (4))); ++typedef long long v2i64 __attribute__ ((vector_size (16), aligned (16))); ++typedef long long v2i64_d __attribute__ ((vector_size (16), aligned (8))); ++typedef unsigned long long v2u64 ++ __attribute__ ((vector_size (16), aligned (16))); ++typedef unsigned long long v2u64_d ++ __attribute__ ((vector_size (16), aligned (8))); ++typedef float v4f32 __attribute__ ((vector_size (16), aligned (16))); ++typedef float v4f32_w __attribute__ ((vector_size (16), aligned (4))); ++typedef double v2f64 __attribute__ ((vector_size (16), aligned (16))); ++typedef double v2f64_d __attribute__ ((vector_size (16), aligned (8))); ++ ++typedef long long __m128i ++ __attribute__ ((__vector_size__ (16), __may_alias__)); ++typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); ++typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__)); ++ ++v16i8 ++__lsx_vsll_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsll_b (_1, _2); ++} ++v8i16 ++__lsx_vsll_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsll_h (_1, _2); ++} ++v4i32 ++__lsx_vsll_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsll_w (_1, _2); ++} ++v2i64 ++__lsx_vsll_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsll_d (_1, _2); ++} ++v16i8 ++__lsx_vslli_b (v16i8 _1) ++{ ++ return __builtin_lsx_vslli_b (_1, 1); ++} ++v8i16 ++__lsx_vslli_h (v8i16 _1) ++{ ++ return __builtin_lsx_vslli_h (_1, 1); ++} ++v4i32 ++__lsx_vslli_w (v4i32 _1) ++{ ++ return __builtin_lsx_vslli_w (_1, 1); ++} ++v2i64 ++__lsx_vslli_d (v2i64 _1) ++{ ++ return __builtin_lsx_vslli_d (_1, 1); ++} ++v16i8 ++__lsx_vsra_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsra_b (_1, _2); ++} ++v8i16 ++__lsx_vsra_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsra_h (_1, _2); ++} ++v4i32 ++__lsx_vsra_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsra_w (_1, _2); ++} ++v2i64 ++__lsx_vsra_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsra_d (_1, _2); ++} ++v16i8 ++__lsx_vsrai_b (v16i8 _1) ++{ ++ return __builtin_lsx_vsrai_b (_1, 1); ++} ++v8i16 ++__lsx_vsrai_h (v8i16 _1) ++{ ++ return __builtin_lsx_vsrai_h (_1, 1); ++} ++v4i32 ++__lsx_vsrai_w (v4i32 _1) ++{ ++ return __builtin_lsx_vsrai_w (_1, 1); ++} ++v2i64 ++__lsx_vsrai_d (v2i64 _1) ++{ ++ return __builtin_lsx_vsrai_d (_1, 1); ++} ++v16i8 ++__lsx_vsrar_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrar_b (_1, _2); ++} ++v8i16 ++__lsx_vsrar_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrar_h (_1, _2); ++} ++v4i32 ++__lsx_vsrar_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrar_w (_1, _2); ++} ++v2i64 ++__lsx_vsrar_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrar_d (_1, _2); ++} ++v16i8 ++__lsx_vsrari_b (v16i8 _1) ++{ ++ return __builtin_lsx_vsrari_b (_1, 1); ++} ++v8i16 ++__lsx_vsrari_h (v8i16 _1) ++{ ++ return __builtin_lsx_vsrari_h (_1, 1); ++} ++v4i32 ++__lsx_vsrari_w (v4i32 _1) ++{ ++ return __builtin_lsx_vsrari_w (_1, 1); ++} ++v2i64 ++__lsx_vsrari_d (v2i64 _1) ++{ ++ return __builtin_lsx_vsrari_d (_1, 1); ++} ++v16i8 ++__lsx_vsrl_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrl_b (_1, _2); ++} ++v8i16 ++__lsx_vsrl_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrl_h (_1, _2); ++} ++v4i32 ++__lsx_vsrl_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrl_w (_1, _2); ++} ++v2i64 ++__lsx_vsrl_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrl_d (_1, _2); ++} ++v16i8 ++__lsx_vsrli_b (v16i8 _1) ++{ ++ return __builtin_lsx_vsrli_b (_1, 1); ++} ++v8i16 ++__lsx_vsrli_h (v8i16 _1) ++{ ++ return __builtin_lsx_vsrli_h (_1, 1); ++} ++v4i32 ++__lsx_vsrli_w (v4i32 _1) ++{ ++ return __builtin_lsx_vsrli_w (_1, 1); ++} ++v2i64 ++__lsx_vsrli_d (v2i64 _1) ++{ ++ return __builtin_lsx_vsrli_d (_1, 1); ++} ++v16i8 ++__lsx_vsrlr_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrlr_b (_1, _2); ++} ++v8i16 ++__lsx_vsrlr_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrlr_h (_1, _2); ++} ++v4i32 ++__lsx_vsrlr_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrlr_w (_1, _2); ++} ++v2i64 ++__lsx_vsrlr_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrlr_d (_1, _2); ++} ++v16i8 ++__lsx_vsrlri_b (v16i8 _1) ++{ ++ return __builtin_lsx_vsrlri_b (_1, 1); ++} ++v8i16 ++__lsx_vsrlri_h (v8i16 _1) ++{ ++ return __builtin_lsx_vsrlri_h (_1, 1); ++} ++v4i32 ++__lsx_vsrlri_w (v4i32 _1) ++{ ++ return __builtin_lsx_vsrlri_w (_1, 1); ++} ++v2i64 ++__lsx_vsrlri_d (v2i64 _1) ++{ ++ return __builtin_lsx_vsrlri_d (_1, 1); ++} ++v16u8 ++__lsx_vbitclr_b (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vbitclr_b (_1, _2); ++} ++v8u16 ++__lsx_vbitclr_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vbitclr_h (_1, _2); ++} ++v4u32 ++__lsx_vbitclr_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vbitclr_w (_1, _2); ++} ++v2u64 ++__lsx_vbitclr_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vbitclr_d (_1, _2); ++} ++v16u8 ++__lsx_vbitclri_b (v16u8 _1) ++{ ++ return __builtin_lsx_vbitclri_b (_1, 1); ++} ++v8u16 ++__lsx_vbitclri_h (v8u16 _1) ++{ ++ return __builtin_lsx_vbitclri_h (_1, 1); ++} ++v4u32 ++__lsx_vbitclri_w (v4u32 _1) ++{ ++ return __builtin_lsx_vbitclri_w (_1, 1); ++} ++v2u64 ++__lsx_vbitclri_d (v2u64 _1) ++{ ++ return __builtin_lsx_vbitclri_d (_1, 1); ++} ++v16u8 ++__lsx_vbitset_b (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vbitset_b (_1, _2); ++} ++v8u16 ++__lsx_vbitset_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vbitset_h (_1, _2); ++} ++v4u32 ++__lsx_vbitset_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vbitset_w (_1, _2); ++} ++v2u64 ++__lsx_vbitset_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vbitset_d (_1, _2); ++} ++v16u8 ++__lsx_vbitseti_b (v16u8 _1) ++{ ++ return __builtin_lsx_vbitseti_b (_1, 1); ++} ++v8u16 ++__lsx_vbitseti_h (v8u16 _1) ++{ ++ return __builtin_lsx_vbitseti_h (_1, 1); ++} ++v4u32 ++__lsx_vbitseti_w (v4u32 _1) ++{ ++ return __builtin_lsx_vbitseti_w (_1, 1); ++} ++v2u64 ++__lsx_vbitseti_d (v2u64 _1) ++{ ++ return __builtin_lsx_vbitseti_d (_1, 1); ++} ++v16u8 ++__lsx_vbitrev_b (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vbitrev_b (_1, _2); ++} ++v8u16 ++__lsx_vbitrev_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vbitrev_h (_1, _2); ++} ++v4u32 ++__lsx_vbitrev_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vbitrev_w (_1, _2); ++} ++v2u64 ++__lsx_vbitrev_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vbitrev_d (_1, _2); ++} ++v16u8 ++__lsx_vbitrevi_b (v16u8 _1) ++{ ++ return __builtin_lsx_vbitrevi_b (_1, 1); ++} ++v8u16 ++__lsx_vbitrevi_h (v8u16 _1) ++{ ++ return __builtin_lsx_vbitrevi_h (_1, 1); ++} ++v4u32 ++__lsx_vbitrevi_w (v4u32 _1) ++{ ++ return __builtin_lsx_vbitrevi_w (_1, 1); ++} ++v2u64 ++__lsx_vbitrevi_d (v2u64 _1) ++{ ++ return __builtin_lsx_vbitrevi_d (_1, 1); ++} ++v16i8 ++__lsx_vadd_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vadd_b (_1, _2); ++} ++v8i16 ++__lsx_vadd_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vadd_h (_1, _2); ++} ++v4i32 ++__lsx_vadd_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vadd_w (_1, _2); ++} ++v2i64 ++__lsx_vadd_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vadd_d (_1, _2); ++} ++v16i8 ++__lsx_vaddi_bu (v16i8 _1) ++{ ++ return __builtin_lsx_vaddi_bu (_1, 1); ++} ++v8i16 ++__lsx_vaddi_hu (v8i16 _1) ++{ ++ return __builtin_lsx_vaddi_hu (_1, 1); ++} ++v4i32 ++__lsx_vaddi_wu (v4i32 _1) ++{ ++ return __builtin_lsx_vaddi_wu (_1, 1); ++} ++v2i64 ++__lsx_vaddi_du (v2i64 _1) ++{ ++ return __builtin_lsx_vaddi_du (_1, 1); ++} ++v16i8 ++__lsx_vsub_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsub_b (_1, _2); ++} ++v8i16 ++__lsx_vsub_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsub_h (_1, _2); ++} ++v4i32 ++__lsx_vsub_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsub_w (_1, _2); ++} ++v2i64 ++__lsx_vsub_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsub_d (_1, _2); ++} ++v16i8 ++__lsx_vsubi_bu (v16i8 _1) ++{ ++ return __builtin_lsx_vsubi_bu (_1, 1); ++} ++v8i16 ++__lsx_vsubi_hu (v8i16 _1) ++{ ++ return __builtin_lsx_vsubi_hu (_1, 1); ++} ++v4i32 ++__lsx_vsubi_wu (v4i32 _1) ++{ ++ return __builtin_lsx_vsubi_wu (_1, 1); ++} ++v2i64 ++__lsx_vsubi_du (v2i64 _1) ++{ ++ return __builtin_lsx_vsubi_du (_1, 1); ++} ++v16i8 ++__lsx_vmax_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmax_b (_1, _2); ++} ++v8i16 ++__lsx_vmax_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmax_h (_1, _2); ++} ++v4i32 ++__lsx_vmax_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmax_w (_1, _2); ++} ++v2i64 ++__lsx_vmax_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmax_d (_1, _2); ++} ++v16i8 ++__lsx_vmaxi_b (v16i8 _1) ++{ ++ return __builtin_lsx_vmaxi_b (_1, 1); ++} ++v8i16 ++__lsx_vmaxi_h (v8i16 _1) ++{ ++ return __builtin_lsx_vmaxi_h (_1, 1); ++} ++v4i32 ++__lsx_vmaxi_w (v4i32 _1) ++{ ++ return __builtin_lsx_vmaxi_w (_1, 1); ++} ++v2i64 ++__lsx_vmaxi_d (v2i64 _1) ++{ ++ return __builtin_lsx_vmaxi_d (_1, 1); ++} ++v16u8 ++__lsx_vmax_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vmax_bu (_1, _2); ++} ++v8u16 ++__lsx_vmax_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vmax_hu (_1, _2); ++} ++v4u32 ++__lsx_vmax_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vmax_wu (_1, _2); ++} ++v2u64 ++__lsx_vmax_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vmax_du (_1, _2); ++} ++v16u8 ++__lsx_vmaxi_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vmaxi_bu (_1, 1); ++} ++v8u16 ++__lsx_vmaxi_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vmaxi_hu (_1, 1); ++} ++v4u32 ++__lsx_vmaxi_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vmaxi_wu (_1, 1); ++} ++v2u64 ++__lsx_vmaxi_du (v2u64 _1) ++{ ++ return __builtin_lsx_vmaxi_du (_1, 1); ++} ++v16i8 ++__lsx_vmin_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmin_b (_1, _2); ++} ++v8i16 ++__lsx_vmin_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmin_h (_1, _2); ++} ++v4i32 ++__lsx_vmin_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmin_w (_1, _2); ++} ++v2i64 ++__lsx_vmin_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmin_d (_1, _2); ++} ++v16i8 ++__lsx_vmini_b (v16i8 _1) ++{ ++ return __builtin_lsx_vmini_b (_1, 1); ++} ++v8i16 ++__lsx_vmini_h (v8i16 _1) ++{ ++ return __builtin_lsx_vmini_h (_1, 1); ++} ++v4i32 ++__lsx_vmini_w (v4i32 _1) ++{ ++ return __builtin_lsx_vmini_w (_1, 1); ++} ++v2i64 ++__lsx_vmini_d (v2i64 _1) ++{ ++ return __builtin_lsx_vmini_d (_1, 1); ++} ++v16u8 ++__lsx_vmin_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vmin_bu (_1, _2); ++} ++v8u16 ++__lsx_vmin_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vmin_hu (_1, _2); ++} ++v4u32 ++__lsx_vmin_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vmin_wu (_1, _2); ++} ++v2u64 ++__lsx_vmin_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vmin_du (_1, _2); ++} ++v16u8 ++__lsx_vmini_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vmini_bu (_1, 1); ++} ++v8u16 ++__lsx_vmini_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vmini_hu (_1, 1); ++} ++v4u32 ++__lsx_vmini_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vmini_wu (_1, 1); ++} ++v2u64 ++__lsx_vmini_du (v2u64 _1) ++{ ++ return __builtin_lsx_vmini_du (_1, 1); ++} ++v16i8 ++__lsx_vseq_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vseq_b (_1, _2); ++} ++v8i16 ++__lsx_vseq_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vseq_h (_1, _2); ++} ++v4i32 ++__lsx_vseq_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vseq_w (_1, _2); ++} ++v2i64 ++__lsx_vseq_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vseq_d (_1, _2); ++} ++v16i8 ++__lsx_vseqi_b (v16i8 _1) ++{ ++ return __builtin_lsx_vseqi_b (_1, 1); ++} ++v8i16 ++__lsx_vseqi_h (v8i16 _1) ++{ ++ return __builtin_lsx_vseqi_h (_1, 1); ++} ++v4i32 ++__lsx_vseqi_w (v4i32 _1) ++{ ++ return __builtin_lsx_vseqi_w (_1, 1); ++} ++v2i64 ++__lsx_vseqi_d (v2i64 _1) ++{ ++ return __builtin_lsx_vseqi_d (_1, 1); ++} ++v16i8 ++__lsx_vslti_b (v16i8 _1) ++{ ++ return __builtin_lsx_vslti_b (_1, 1); ++} ++v16i8 ++__lsx_vslt_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vslt_b (_1, _2); ++} ++v8i16 ++__lsx_vslt_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vslt_h (_1, _2); ++} ++v4i32 ++__lsx_vslt_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vslt_w (_1, _2); ++} ++v2i64 ++__lsx_vslt_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vslt_d (_1, _2); ++} ++v8i16 ++__lsx_vslti_h (v8i16 _1) ++{ ++ return __builtin_lsx_vslti_h (_1, 1); ++} ++v4i32 ++__lsx_vslti_w (v4i32 _1) ++{ ++ return __builtin_lsx_vslti_w (_1, 1); ++} ++v2i64 ++__lsx_vslti_d (v2i64 _1) ++{ ++ return __builtin_lsx_vslti_d (_1, 1); ++} ++v16i8 ++__lsx_vslt_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vslt_bu (_1, _2); ++} ++v8i16 ++__lsx_vslt_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vslt_hu (_1, _2); ++} ++v4i32 ++__lsx_vslt_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vslt_wu (_1, _2); ++} ++v2i64 ++__lsx_vslt_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vslt_du (_1, _2); ++} ++v16i8 ++__lsx_vslti_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vslti_bu (_1, 1); ++} ++v8i16 ++__lsx_vslti_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vslti_hu (_1, 1); ++} ++v4i32 ++__lsx_vslti_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vslti_wu (_1, 1); ++} ++v2i64 ++__lsx_vslti_du (v2u64 _1) ++{ ++ return __builtin_lsx_vslti_du (_1, 1); ++} ++v16i8 ++__lsx_vsle_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsle_b (_1, _2); ++} ++v8i16 ++__lsx_vsle_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsle_h (_1, _2); ++} ++v4i32 ++__lsx_vsle_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsle_w (_1, _2); ++} ++v2i64 ++__lsx_vsle_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsle_d (_1, _2); ++} ++v16i8 ++__lsx_vslei_b (v16i8 _1) ++{ ++ return __builtin_lsx_vslei_b (_1, 1); ++} ++v8i16 ++__lsx_vslei_h (v8i16 _1) ++{ ++ return __builtin_lsx_vslei_h (_1, 1); ++} ++v4i32 ++__lsx_vslei_w (v4i32 _1) ++{ ++ return __builtin_lsx_vslei_w (_1, 1); ++} ++v2i64 ++__lsx_vslei_d (v2i64 _1) ++{ ++ return __builtin_lsx_vslei_d (_1, 1); ++} ++v16i8 ++__lsx_vsle_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vsle_bu (_1, _2); ++} ++v8i16 ++__lsx_vsle_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vsle_hu (_1, _2); ++} ++v4i32 ++__lsx_vsle_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vsle_wu (_1, _2); ++} ++v2i64 ++__lsx_vsle_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vsle_du (_1, _2); ++} ++v16i8 ++__lsx_vslei_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vslei_bu (_1, 1); ++} ++v8i16 ++__lsx_vslei_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vslei_hu (_1, 1); ++} ++v4i32 ++__lsx_vslei_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vslei_wu (_1, 1); ++} ++v2i64 ++__lsx_vslei_du (v2u64 _1) ++{ ++ return __builtin_lsx_vslei_du (_1, 1); ++} ++v16i8 ++__lsx_vsat_b (v16i8 _1) ++{ ++ return __builtin_lsx_vsat_b (_1, 1); ++} ++v8i16 ++__lsx_vsat_h (v8i16 _1) ++{ ++ return __builtin_lsx_vsat_h (_1, 1); ++} ++v4i32 ++__lsx_vsat_w (v4i32 _1) ++{ ++ return __builtin_lsx_vsat_w (_1, 1); ++} ++v2i64 ++__lsx_vsat_d (v2i64 _1) ++{ ++ return __builtin_lsx_vsat_d (_1, 1); ++} ++v16u8 ++__lsx_vsat_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vsat_bu (_1, 1); ++} ++v8u16 ++__lsx_vsat_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vsat_hu (_1, 1); ++} ++v4u32 ++__lsx_vsat_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vsat_wu (_1, 1); ++} ++v2u64 ++__lsx_vsat_du (v2u64 _1) ++{ ++ return __builtin_lsx_vsat_du (_1, 1); ++} ++v16i8 ++__lsx_vadda_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vadda_b (_1, _2); ++} ++v8i16 ++__lsx_vadda_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vadda_h (_1, _2); ++} ++v4i32 ++__lsx_vadda_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vadda_w (_1, _2); ++} ++v2i64 ++__lsx_vadda_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vadda_d (_1, _2); ++} ++v16i8 ++__lsx_vsadd_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsadd_b (_1, _2); ++} ++v8i16 ++__lsx_vsadd_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsadd_h (_1, _2); ++} ++v4i32 ++__lsx_vsadd_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsadd_w (_1, _2); ++} ++v2i64 ++__lsx_vsadd_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsadd_d (_1, _2); ++} ++v16u8 ++__lsx_vsadd_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vsadd_bu (_1, _2); ++} ++v8u16 ++__lsx_vsadd_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vsadd_hu (_1, _2); ++} ++v4u32 ++__lsx_vsadd_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vsadd_wu (_1, _2); ++} ++v2u64 ++__lsx_vsadd_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vsadd_du (_1, _2); ++} ++v16i8 ++__lsx_vavg_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vavg_b (_1, _2); ++} ++v8i16 ++__lsx_vavg_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vavg_h (_1, _2); ++} ++v4i32 ++__lsx_vavg_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vavg_w (_1, _2); ++} ++v2i64 ++__lsx_vavg_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vavg_d (_1, _2); ++} ++v16u8 ++__lsx_vavg_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vavg_bu (_1, _2); ++} ++v8u16 ++__lsx_vavg_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vavg_hu (_1, _2); ++} ++v4u32 ++__lsx_vavg_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vavg_wu (_1, _2); ++} ++v2u64 ++__lsx_vavg_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vavg_du (_1, _2); ++} ++v16i8 ++__lsx_vavgr_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vavgr_b (_1, _2); ++} ++v8i16 ++__lsx_vavgr_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vavgr_h (_1, _2); ++} ++v4i32 ++__lsx_vavgr_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vavgr_w (_1, _2); ++} ++v2i64 ++__lsx_vavgr_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vavgr_d (_1, _2); ++} ++v16u8 ++__lsx_vavgr_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vavgr_bu (_1, _2); ++} ++v8u16 ++__lsx_vavgr_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vavgr_hu (_1, _2); ++} ++v4u32 ++__lsx_vavgr_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vavgr_wu (_1, _2); ++} ++v2u64 ++__lsx_vavgr_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vavgr_du (_1, _2); ++} ++v16i8 ++__lsx_vssub_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssub_b (_1, _2); ++} ++v8i16 ++__lsx_vssub_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssub_h (_1, _2); ++} ++v4i32 ++__lsx_vssub_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssub_w (_1, _2); ++} ++v2i64 ++__lsx_vssub_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssub_d (_1, _2); ++} ++v16u8 ++__lsx_vssub_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vssub_bu (_1, _2); ++} ++v8u16 ++__lsx_vssub_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vssub_hu (_1, _2); ++} ++v4u32 ++__lsx_vssub_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vssub_wu (_1, _2); ++} ++v2u64 ++__lsx_vssub_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vssub_du (_1, _2); ++} ++v16i8 ++__lsx_vabsd_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vabsd_b (_1, _2); ++} ++v8i16 ++__lsx_vabsd_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vabsd_h (_1, _2); ++} ++v4i32 ++__lsx_vabsd_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vabsd_w (_1, _2); ++} ++v2i64 ++__lsx_vabsd_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vabsd_d (_1, _2); ++} ++v16u8 ++__lsx_vabsd_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vabsd_bu (_1, _2); ++} ++v8u16 ++__lsx_vabsd_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vabsd_hu (_1, _2); ++} ++v4u32 ++__lsx_vabsd_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vabsd_wu (_1, _2); ++} ++v2u64 ++__lsx_vabsd_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vabsd_du (_1, _2); ++} ++v16i8 ++__lsx_vmul_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmul_b (_1, _2); ++} ++v8i16 ++__lsx_vmul_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmul_h (_1, _2); ++} ++v4i32 ++__lsx_vmul_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmul_w (_1, _2); ++} ++v2i64 ++__lsx_vmul_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmul_d (_1, _2); ++} ++v16i8 ++__lsx_vmadd_b (v16i8 _1, v16i8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vmadd_b (_1, _2, _3); ++} ++v8i16 ++__lsx_vmadd_h (v8i16 _1, v8i16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vmadd_h (_1, _2, _3); ++} ++v4i32 ++__lsx_vmadd_w (v4i32 _1, v4i32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vmadd_w (_1, _2, _3); ++} ++v2i64 ++__lsx_vmadd_d (v2i64 _1, v2i64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vmadd_d (_1, _2, _3); ++} ++v16i8 ++__lsx_vmsub_b (v16i8 _1, v16i8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vmsub_b (_1, _2, _3); ++} ++v8i16 ++__lsx_vmsub_h (v8i16 _1, v8i16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vmsub_h (_1, _2, _3); ++} ++v4i32 ++__lsx_vmsub_w (v4i32 _1, v4i32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vmsub_w (_1, _2, _3); ++} ++v2i64 ++__lsx_vmsub_d (v2i64 _1, v2i64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vmsub_d (_1, _2, _3); ++} ++v16i8 ++__lsx_vdiv_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vdiv_b (_1, _2); ++} ++v8i16 ++__lsx_vdiv_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vdiv_h (_1, _2); ++} ++v4i32 ++__lsx_vdiv_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vdiv_w (_1, _2); ++} ++v2i64 ++__lsx_vdiv_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vdiv_d (_1, _2); ++} ++v16u8 ++__lsx_vdiv_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vdiv_bu (_1, _2); ++} ++v8u16 ++__lsx_vdiv_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vdiv_hu (_1, _2); ++} ++v4u32 ++__lsx_vdiv_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vdiv_wu (_1, _2); ++} ++v2u64 ++__lsx_vdiv_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vdiv_du (_1, _2); ++} ++v8i16 ++__lsx_vhaddw_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vhaddw_h_b (_1, _2); ++} ++v4i32 ++__lsx_vhaddw_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vhaddw_w_h (_1, _2); ++} ++v2i64 ++__lsx_vhaddw_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vhaddw_d_w (_1, _2); ++} ++v8u16 ++__lsx_vhaddw_hu_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vhaddw_hu_bu (_1, _2); ++} ++v4u32 ++__lsx_vhaddw_wu_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vhaddw_wu_hu (_1, _2); ++} ++v2u64 ++__lsx_vhaddw_du_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vhaddw_du_wu (_1, _2); ++} ++v8i16 ++__lsx_vhsubw_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vhsubw_h_b (_1, _2); ++} ++v4i32 ++__lsx_vhsubw_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vhsubw_w_h (_1, _2); ++} ++v2i64 ++__lsx_vhsubw_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vhsubw_d_w (_1, _2); ++} ++v8i16 ++__lsx_vhsubw_hu_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vhsubw_hu_bu (_1, _2); ++} ++v4i32 ++__lsx_vhsubw_wu_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vhsubw_wu_hu (_1, _2); ++} ++v2i64 ++__lsx_vhsubw_du_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vhsubw_du_wu (_1, _2); ++} ++v16i8 ++__lsx_vmod_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmod_b (_1, _2); ++} ++v8i16 ++__lsx_vmod_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmod_h (_1, _2); ++} ++v4i32 ++__lsx_vmod_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmod_w (_1, _2); ++} ++v2i64 ++__lsx_vmod_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmod_d (_1, _2); ++} ++v16u8 ++__lsx_vmod_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vmod_bu (_1, _2); ++} ++v8u16 ++__lsx_vmod_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vmod_hu (_1, _2); ++} ++v4u32 ++__lsx_vmod_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vmod_wu (_1, _2); ++} ++v2u64 ++__lsx_vmod_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vmod_du (_1, _2); ++} ++v16i8 ++__lsx_vreplve_b (v16i8 _1, int _2) ++{ ++ return __builtin_lsx_vreplve_b (_1, _2); ++} ++v8i16 ++__lsx_vreplve_h (v8i16 _1, int _2) ++{ ++ return __builtin_lsx_vreplve_h (_1, _2); ++} ++v4i32 ++__lsx_vreplve_w (v4i32 _1, int _2) ++{ ++ return __builtin_lsx_vreplve_w (_1, _2); ++} ++v2i64 ++__lsx_vreplve_d (v2i64 _1, int _2) ++{ ++ return __builtin_lsx_vreplve_d (_1, _2); ++} ++v16i8 ++__lsx_vreplvei_b (v16i8 _1) ++{ ++ return __builtin_lsx_vreplvei_b (_1, 1); ++} ++v8i16 ++__lsx_vreplvei_h (v8i16 _1) ++{ ++ return __builtin_lsx_vreplvei_h (_1, 1); ++} ++v4i32 ++__lsx_vreplvei_w (v4i32 _1) ++{ ++ return __builtin_lsx_vreplvei_w (_1, 1); ++} ++v2i64 ++__lsx_vreplvei_d (v2i64 _1) ++{ ++ return __builtin_lsx_vreplvei_d (_1, 1); ++} ++v16i8 ++__lsx_vpickev_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vpickev_b (_1, _2); ++} ++v8i16 ++__lsx_vpickev_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vpickev_h (_1, _2); ++} ++v4i32 ++__lsx_vpickev_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vpickev_w (_1, _2); ++} ++v2i64 ++__lsx_vpickev_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vpickev_d (_1, _2); ++} ++v16i8 ++__lsx_vpickod_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vpickod_b (_1, _2); ++} ++v8i16 ++__lsx_vpickod_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vpickod_h (_1, _2); ++} ++v4i32 ++__lsx_vpickod_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vpickod_w (_1, _2); ++} ++v2i64 ++__lsx_vpickod_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vpickod_d (_1, _2); ++} ++v16i8 ++__lsx_vilvh_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vilvh_b (_1, _2); ++} ++v8i16 ++__lsx_vilvh_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vilvh_h (_1, _2); ++} ++v4i32 ++__lsx_vilvh_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vilvh_w (_1, _2); ++} ++v2i64 ++__lsx_vilvh_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vilvh_d (_1, _2); ++} ++v16i8 ++__lsx_vilvl_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vilvl_b (_1, _2); ++} ++v8i16 ++__lsx_vilvl_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vilvl_h (_1, _2); ++} ++v4i32 ++__lsx_vilvl_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vilvl_w (_1, _2); ++} ++v2i64 ++__lsx_vilvl_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vilvl_d (_1, _2); ++} ++v16i8 ++__lsx_vpackev_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vpackev_b (_1, _2); ++} ++v8i16 ++__lsx_vpackev_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vpackev_h (_1, _2); ++} ++v4i32 ++__lsx_vpackev_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vpackev_w (_1, _2); ++} ++v2i64 ++__lsx_vpackev_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vpackev_d (_1, _2); ++} ++v16i8 ++__lsx_vpackod_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vpackod_b (_1, _2); ++} ++v8i16 ++__lsx_vpackod_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vpackod_h (_1, _2); ++} ++v4i32 ++__lsx_vpackod_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vpackod_w (_1, _2); ++} ++v2i64 ++__lsx_vpackod_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vpackod_d (_1, _2); ++} ++v8i16 ++__lsx_vshuf_h (v8i16 _1, v8i16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vshuf_h (_1, _2, _3); ++} ++v4i32 ++__lsx_vshuf_w (v4i32 _1, v4i32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vshuf_w (_1, _2, _3); ++} ++v2i64 ++__lsx_vshuf_d (v2i64 _1, v2i64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vshuf_d (_1, _2, _3); ++} ++v16u8 ++__lsx_vand_v (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vand_v (_1, _2); ++} ++v16u8 ++__lsx_vandi_b (v16u8 _1) ++{ ++ return __builtin_lsx_vandi_b (_1, 1); ++} ++v16u8 ++__lsx_vor_v (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vor_v (_1, _2); ++} ++v16u8 ++__lsx_vori_b (v16u8 _1) ++{ ++ return __builtin_lsx_vori_b (_1, 1); ++} ++v16u8 ++__lsx_vnor_v (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vnor_v (_1, _2); ++} ++v16u8 ++__lsx_vnori_b (v16u8 _1) ++{ ++ return __builtin_lsx_vnori_b (_1, 1); ++} ++v16u8 ++__lsx_vxor_v (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vxor_v (_1, _2); ++} ++v16u8 ++__lsx_vxori_b (v16u8 _1) ++{ ++ return __builtin_lsx_vxori_b (_1, 1); ++} ++v16u8 ++__lsx_vbitsel_v (v16u8 _1, v16u8 _2, v16u8 _3) ++{ ++ return __builtin_lsx_vbitsel_v (_1, _2, _3); ++} ++v16u8 ++__lsx_vbitseli_b (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vbitseli_b (_1, _2, 1); ++} ++v16i8 ++__lsx_vshuf4i_b (v16i8 _1) ++{ ++ return __builtin_lsx_vshuf4i_b (_1, 1); ++} ++v8i16 ++__lsx_vshuf4i_h (v8i16 _1) ++{ ++ return __builtin_lsx_vshuf4i_h (_1, 1); ++} ++v4i32 ++__lsx_vshuf4i_w (v4i32 _1) ++{ ++ return __builtin_lsx_vshuf4i_w (_1, 1); ++} ++v16i8 ++__lsx_vreplgr2vr_b (int _1) ++{ ++ return __builtin_lsx_vreplgr2vr_b (_1); ++} ++v8i16 ++__lsx_vreplgr2vr_h (int _1) ++{ ++ return __builtin_lsx_vreplgr2vr_h (_1); ++} ++v4i32 ++__lsx_vreplgr2vr_w (int _1) ++{ ++ return __builtin_lsx_vreplgr2vr_w (_1); ++} ++v2i64 ++__lsx_vreplgr2vr_d (long _1) ++{ ++ return __builtin_lsx_vreplgr2vr_d (_1); ++} ++v16i8 ++__lsx_vpcnt_b (v16i8 _1) ++{ ++ return __builtin_lsx_vpcnt_b (_1); ++} ++v8i16 ++__lsx_vpcnt_h (v8i16 _1) ++{ ++ return __builtin_lsx_vpcnt_h (_1); ++} ++v4i32 ++__lsx_vpcnt_w (v4i32 _1) ++{ ++ return __builtin_lsx_vpcnt_w (_1); ++} ++v2i64 ++__lsx_vpcnt_d (v2i64 _1) ++{ ++ return __builtin_lsx_vpcnt_d (_1); ++} ++v16i8 ++__lsx_vclo_b (v16i8 _1) ++{ ++ return __builtin_lsx_vclo_b (_1); ++} ++v8i16 ++__lsx_vclo_h (v8i16 _1) ++{ ++ return __builtin_lsx_vclo_h (_1); ++} ++v4i32 ++__lsx_vclo_w (v4i32 _1) ++{ ++ return __builtin_lsx_vclo_w (_1); ++} ++v2i64 ++__lsx_vclo_d (v2i64 _1) ++{ ++ return __builtin_lsx_vclo_d (_1); ++} ++v16i8 ++__lsx_vclz_b (v16i8 _1) ++{ ++ return __builtin_lsx_vclz_b (_1); ++} ++v8i16 ++__lsx_vclz_h (v8i16 _1) ++{ ++ return __builtin_lsx_vclz_h (_1); ++} ++v4i32 ++__lsx_vclz_w (v4i32 _1) ++{ ++ return __builtin_lsx_vclz_w (_1); ++} ++v2i64 ++__lsx_vclz_d (v2i64 _1) ++{ ++ return __builtin_lsx_vclz_d (_1); ++} ++int ++__lsx_vpickve2gr_b (v16i8 _1) ++{ ++ return __builtin_lsx_vpickve2gr_b (_1, 1); ++} ++int ++__lsx_vpickve2gr_h (v8i16 _1) ++{ ++ return __builtin_lsx_vpickve2gr_h (_1, 1); ++} ++int ++__lsx_vpickve2gr_w (v4i32 _1) ++{ ++ return __builtin_lsx_vpickve2gr_w (_1, 1); ++} ++long ++__lsx_vpickve2gr_d (v2i64 _1) ++{ ++ return __builtin_lsx_vpickve2gr_d (_1, 1); ++} ++unsigned int ++__lsx_vpickve2gr_bu (v16i8 _1) ++{ ++ return __builtin_lsx_vpickve2gr_bu (_1, 1); ++} ++unsigned int ++__lsx_vpickve2gr_hu (v8i16 _1) ++{ ++ return __builtin_lsx_vpickve2gr_hu (_1, 1); ++} ++unsigned int ++__lsx_vpickve2gr_wu (v4i32 _1) ++{ ++ return __builtin_lsx_vpickve2gr_wu (_1, 1); ++} ++unsigned long int ++__lsx_vpickve2gr_du (v2i64 _1) ++{ ++ return __builtin_lsx_vpickve2gr_du (_1, 1); ++} ++v16i8 ++__lsx_vinsgr2vr_b (v16i8 _1) ++{ ++ return __builtin_lsx_vinsgr2vr_b (_1, 1, 1); ++} ++v8i16 ++__lsx_vinsgr2vr_h (v8i16 _1) ++{ ++ return __builtin_lsx_vinsgr2vr_h (_1, 1, 1); ++} ++v4i32 ++__lsx_vinsgr2vr_w (v4i32 _1) ++{ ++ return __builtin_lsx_vinsgr2vr_w (_1, 1, 1); ++} ++v2i64 ++__lsx_vinsgr2vr_d (v2i64 _1) ++{ ++ return __builtin_lsx_vinsgr2vr_d (_1, 1, 1); ++} ++v4f32 ++__lsx_vfadd_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfadd_s (_1, _2); ++} ++v2f64 ++__lsx_vfadd_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfadd_d (_1, _2); ++} ++v4f32 ++__lsx_vfsub_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfsub_s (_1, _2); ++} ++v2f64 ++__lsx_vfsub_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfsub_d (_1, _2); ++} ++v4f32 ++__lsx_vfmul_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfmul_s (_1, _2); ++} ++v2f64 ++__lsx_vfmul_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfmul_d (_1, _2); ++} ++v4f32 ++__lsx_vfdiv_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfdiv_s (_1, _2); ++} ++v2f64 ++__lsx_vfdiv_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfdiv_d (_1, _2); ++} ++v8i16 ++__lsx_vfcvt_h_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcvt_h_s (_1, _2); ++} ++v4f32 ++__lsx_vfcvt_s_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcvt_s_d (_1, _2); ++} ++v4f32 ++__lsx_vfmin_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfmin_s (_1, _2); ++} ++v2f64 ++__lsx_vfmin_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfmin_d (_1, _2); ++} ++v4f32 ++__lsx_vfmina_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfmina_s (_1, _2); ++} ++v2f64 ++__lsx_vfmina_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfmina_d (_1, _2); ++} ++v4f32 ++__lsx_vfmax_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfmax_s (_1, _2); ++} ++v2f64 ++__lsx_vfmax_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfmax_d (_1, _2); ++} ++v4f32 ++__lsx_vfmaxa_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfmaxa_s (_1, _2); ++} ++v2f64 ++__lsx_vfmaxa_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfmaxa_d (_1, _2); ++} ++v4i32 ++__lsx_vfclass_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfclass_s (_1); ++} ++v2i64 ++__lsx_vfclass_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfclass_d (_1); ++} ++v4f32 ++__lsx_vfsqrt_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfsqrt_s (_1); ++} ++v2f64 ++__lsx_vfsqrt_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfsqrt_d (_1); ++} ++v4f32 ++__lsx_vfrecip_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrecip_s (_1); ++} ++v2f64 ++__lsx_vfrecip_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrecip_d (_1); ++} ++v4f32 ++__lsx_vfrint_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrint_s (_1); ++} ++v2f64 ++__lsx_vfrint_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrint_d (_1); ++} ++v4f32 ++__lsx_vfrsqrt_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrsqrt_s (_1); ++} ++v2f64 ++__lsx_vfrsqrt_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrsqrt_d (_1); ++} ++v4f32 ++__lsx_vflogb_s (v4f32 _1) ++{ ++ return __builtin_lsx_vflogb_s (_1); ++} ++v2f64 ++__lsx_vflogb_d (v2f64 _1) ++{ ++ return __builtin_lsx_vflogb_d (_1); ++} ++v4f32 ++__lsx_vfcvth_s_h (v8i16 _1) ++{ ++ return __builtin_lsx_vfcvth_s_h (_1); ++} ++v2f64 ++__lsx_vfcvth_d_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfcvth_d_s (_1); ++} ++v4f32 ++__lsx_vfcvtl_s_h (v8i16 _1) ++{ ++ return __builtin_lsx_vfcvtl_s_h (_1); ++} ++v2f64 ++__lsx_vfcvtl_d_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfcvtl_d_s (_1); ++} ++v4i32 ++__lsx_vftint_w_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftint_w_s (_1); ++} ++v2i64 ++__lsx_vftint_l_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftint_l_d (_1); ++} ++v4u32 ++__lsx_vftint_wu_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftint_wu_s (_1); ++} ++v2u64 ++__lsx_vftint_lu_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftint_lu_d (_1); ++} ++v4i32 ++__lsx_vftintrz_w_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrz_w_s (_1); ++} ++v2i64 ++__lsx_vftintrz_l_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftintrz_l_d (_1); ++} ++v4u32 ++__lsx_vftintrz_wu_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrz_wu_s (_1); ++} ++v2u64 ++__lsx_vftintrz_lu_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftintrz_lu_d (_1); ++} ++v4f32 ++__lsx_vffint_s_w (v4i32 _1) ++{ ++ return __builtin_lsx_vffint_s_w (_1); ++} ++v2f64 ++__lsx_vffint_d_l (v2i64 _1) ++{ ++ return __builtin_lsx_vffint_d_l (_1); ++} ++v4f32 ++__lsx_vffint_s_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vffint_s_wu (_1); ++} ++v2f64 ++__lsx_vffint_d_lu (v2u64 _1) ++{ ++ return __builtin_lsx_vffint_d_lu (_1); ++} ++v16u8 ++__lsx_vandn_v (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vandn_v (_1, _2); ++} ++v16i8 ++__lsx_vneg_b (v16i8 _1) ++{ ++ return __builtin_lsx_vneg_b (_1); ++} ++v8i16 ++__lsx_vneg_h (v8i16 _1) ++{ ++ return __builtin_lsx_vneg_h (_1); ++} ++v4i32 ++__lsx_vneg_w (v4i32 _1) ++{ ++ return __builtin_lsx_vneg_w (_1); ++} ++v2i64 ++__lsx_vneg_d (v2i64 _1) ++{ ++ return __builtin_lsx_vneg_d (_1); ++} ++v16i8 ++__lsx_vmuh_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmuh_b (_1, _2); ++} ++v8i16 ++__lsx_vmuh_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmuh_h (_1, _2); ++} ++v4i32 ++__lsx_vmuh_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmuh_w (_1, _2); ++} ++v2i64 ++__lsx_vmuh_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmuh_d (_1, _2); ++} ++v16u8 ++__lsx_vmuh_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vmuh_bu (_1, _2); ++} ++v8u16 ++__lsx_vmuh_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vmuh_hu (_1, _2); ++} ++v4u32 ++__lsx_vmuh_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vmuh_wu (_1, _2); ++} ++v2u64 ++__lsx_vmuh_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vmuh_du (_1, _2); ++} ++v8i16 ++__lsx_vsllwil_h_b (v16i8 _1) ++{ ++ return __builtin_lsx_vsllwil_h_b (_1, 1); ++} ++v4i32 ++__lsx_vsllwil_w_h (v8i16 _1) ++{ ++ return __builtin_lsx_vsllwil_w_h (_1, 1); ++} ++v2i64 ++__lsx_vsllwil_d_w (v4i32 _1) ++{ ++ return __builtin_lsx_vsllwil_d_w (_1, 1); ++} ++v8u16 ++__lsx_vsllwil_hu_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vsllwil_hu_bu (_1, 1); ++} ++v4u32 ++__lsx_vsllwil_wu_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vsllwil_wu_hu (_1, 1); ++} ++v2u64 ++__lsx_vsllwil_du_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vsllwil_du_wu (_1, 1); ++} ++v16i8 ++__lsx_vsran_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsran_b_h (_1, _2); ++} ++v8i16 ++__lsx_vsran_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsran_h_w (_1, _2); ++} ++v4i32 ++__lsx_vsran_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsran_w_d (_1, _2); ++} ++v16i8 ++__lsx_vssran_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssran_b_h (_1, _2); ++} ++v8i16 ++__lsx_vssran_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssran_h_w (_1, _2); ++} ++v4i32 ++__lsx_vssran_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssran_w_d (_1, _2); ++} ++v16u8 ++__lsx_vssran_bu_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vssran_bu_h (_1, _2); ++} ++v8u16 ++__lsx_vssran_hu_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vssran_hu_w (_1, _2); ++} ++v4u32 ++__lsx_vssran_wu_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vssran_wu_d (_1, _2); ++} ++v16i8 ++__lsx_vsrarn_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrarn_b_h (_1, _2); ++} ++v8i16 ++__lsx_vsrarn_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrarn_h_w (_1, _2); ++} ++v4i32 ++__lsx_vsrarn_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrarn_w_d (_1, _2); ++} ++v16i8 ++__lsx_vssrarn_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrarn_b_h (_1, _2); ++} ++v8i16 ++__lsx_vssrarn_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrarn_h_w (_1, _2); ++} ++v4i32 ++__lsx_vssrarn_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrarn_w_d (_1, _2); ++} ++v16u8 ++__lsx_vssrarn_bu_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vssrarn_bu_h (_1, _2); ++} ++v8u16 ++__lsx_vssrarn_hu_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vssrarn_hu_w (_1, _2); ++} ++v4u32 ++__lsx_vssrarn_wu_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vssrarn_wu_d (_1, _2); ++} ++v16i8 ++__lsx_vsrln_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrln_b_h (_1, _2); ++} ++v8i16 ++__lsx_vsrln_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrln_h_w (_1, _2); ++} ++v4i32 ++__lsx_vsrln_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrln_w_d (_1, _2); ++} ++v16u8 ++__lsx_vssrln_bu_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vssrln_bu_h (_1, _2); ++} ++v8u16 ++__lsx_vssrln_hu_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vssrln_hu_w (_1, _2); ++} ++v4u32 ++__lsx_vssrln_wu_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vssrln_wu_d (_1, _2); ++} ++v16i8 ++__lsx_vsrlrn_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrlrn_b_h (_1, _2); ++} ++v8i16 ++__lsx_vsrlrn_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrlrn_h_w (_1, _2); ++} ++v4i32 ++__lsx_vsrlrn_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrlrn_w_d (_1, _2); ++} ++v16u8 ++__lsx_vssrlrn_bu_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vssrlrn_bu_h (_1, _2); ++} ++v8u16 ++__lsx_vssrlrn_hu_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vssrlrn_hu_w (_1, _2); ++} ++v4u32 ++__lsx_vssrlrn_wu_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vssrlrn_wu_d (_1, _2); ++} ++v16i8 ++__lsx_vfrstpi_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vfrstpi_b (_1, _2, 1); ++} ++v8i16 ++__lsx_vfrstpi_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vfrstpi_h (_1, _2, 1); ++} ++v16i8 ++__lsx_vfrstp_b (v16i8 _1, v16i8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vfrstp_b (_1, _2, _3); ++} ++v8i16 ++__lsx_vfrstp_h (v8i16 _1, v8i16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vfrstp_h (_1, _2, _3); ++} ++v2i64 ++__lsx_vshuf4i_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vshuf4i_d (_1, _2, 1); ++} ++v16i8 ++__lsx_vbsrl_v (v16i8 _1) ++{ ++ return __builtin_lsx_vbsrl_v (_1, 1); ++} ++v16i8 ++__lsx_vbsll_v (v16i8 _1) ++{ ++ return __builtin_lsx_vbsll_v (_1, 1); ++} ++v16i8 ++__lsx_vextrins_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vextrins_b (_1, _2, 1); ++} ++v8i16 ++__lsx_vextrins_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vextrins_h (_1, _2, 1); ++} ++v4i32 ++__lsx_vextrins_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vextrins_w (_1, _2, 1); ++} ++v2i64 ++__lsx_vextrins_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vextrins_d (_1, _2, 1); ++} ++v16i8 ++__lsx_vmskltz_b (v16i8 _1) ++{ ++ return __builtin_lsx_vmskltz_b (_1); ++} ++v8i16 ++__lsx_vmskltz_h (v8i16 _1) ++{ ++ return __builtin_lsx_vmskltz_h (_1); ++} ++v4i32 ++__lsx_vmskltz_w (v4i32 _1) ++{ ++ return __builtin_lsx_vmskltz_w (_1); ++} ++v2i64 ++__lsx_vmskltz_d (v2i64 _1) ++{ ++ return __builtin_lsx_vmskltz_d (_1); ++} ++v16i8 ++__lsx_vsigncov_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsigncov_b (_1, _2); ++} ++v8i16 ++__lsx_vsigncov_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsigncov_h (_1, _2); ++} ++v4i32 ++__lsx_vsigncov_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsigncov_w (_1, _2); ++} ++v2i64 ++__lsx_vsigncov_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsigncov_d (_1, _2); ++} ++v4f32 ++__lsx_vfmadd_s (v4f32 _1, v4f32 _2, v4f32 _3) ++{ ++ return __builtin_lsx_vfmadd_s (_1, _2, _3); ++} ++v2f64 ++__lsx_vfmadd_d (v2f64 _1, v2f64 _2, v2f64 _3) ++{ ++ return __builtin_lsx_vfmadd_d (_1, _2, _3); ++} ++v4f32 ++__lsx_vfmsub_s (v4f32 _1, v4f32 _2, v4f32 _3) ++{ ++ return __builtin_lsx_vfmsub_s (_1, _2, _3); ++} ++v2f64 ++__lsx_vfmsub_d (v2f64 _1, v2f64 _2, v2f64 _3) ++{ ++ return __builtin_lsx_vfmsub_d (_1, _2, _3); ++} ++v4f32 ++__lsx_vfnmadd_s (v4f32 _1, v4f32 _2, v4f32 _3) ++{ ++ return __builtin_lsx_vfnmadd_s (_1, _2, _3); ++} ++v2f64 ++__lsx_vfnmadd_d (v2f64 _1, v2f64 _2, v2f64 _3) ++{ ++ return __builtin_lsx_vfnmadd_d (_1, _2, _3); ++} ++v4f32 ++__lsx_vfnmsub_s (v4f32 _1, v4f32 _2, v4f32 _3) ++{ ++ return __builtin_lsx_vfnmsub_s (_1, _2, _3); ++} ++v2f64 ++__lsx_vfnmsub_d (v2f64 _1, v2f64 _2, v2f64 _3) ++{ ++ return __builtin_lsx_vfnmsub_d (_1, _2, _3); ++} ++v4i32 ++__lsx_vftintrne_w_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrne_w_s (_1); ++} ++v2i64 ++__lsx_vftintrne_l_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftintrne_l_d (_1); ++} ++v4i32 ++__lsx_vftintrp_w_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrp_w_s (_1); ++} ++v2i64 ++__lsx_vftintrp_l_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftintrp_l_d (_1); ++} ++v4i32 ++__lsx_vftintrm_w_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrm_w_s (_1); ++} ++v2i64 ++__lsx_vftintrm_l_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftintrm_l_d (_1); ++} ++v4i32 ++__lsx_vftint_w_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vftint_w_d (_1, _2); ++} ++v4f32 ++__lsx_vffint_s_l (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vffint_s_l (_1, _2); ++} ++v4i32 ++__lsx_vftintrz_w_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vftintrz_w_d (_1, _2); ++} ++v4i32 ++__lsx_vftintrp_w_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vftintrp_w_d (_1, _2); ++} ++v4i32 ++__lsx_vftintrm_w_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vftintrm_w_d (_1, _2); ++} ++v4i32 ++__lsx_vftintrne_w_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vftintrne_w_d (_1, _2); ++} ++v2i64 ++__lsx_vftintl_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintl_l_s (_1); ++} ++v2i64 ++__lsx_vftinth_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftinth_l_s (_1); ++} ++v2f64 ++__lsx_vffinth_d_w (v4i32 _1) ++{ ++ return __builtin_lsx_vffinth_d_w (_1); ++} ++v2f64 ++__lsx_vffintl_d_w (v4i32 _1) ++{ ++ return __builtin_lsx_vffintl_d_w (_1); ++} ++v2i64 ++__lsx_vftintrzl_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrzl_l_s (_1); ++} ++v2i64 ++__lsx_vftintrzh_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrzh_l_s (_1); ++} ++v2i64 ++__lsx_vftintrpl_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrpl_l_s (_1); ++} ++v2i64 ++__lsx_vftintrph_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrph_l_s (_1); ++} ++v2i64 ++__lsx_vftintrml_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrml_l_s (_1); ++} ++v2i64 ++__lsx_vftintrmh_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrmh_l_s (_1); ++} ++v2i64 ++__lsx_vftintrnel_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrnel_l_s (_1); ++} ++v2i64 ++__lsx_vftintrneh_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrneh_l_s (_1); ++} ++v4f32 ++__lsx_vfrintrne_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrintrne_s (_1); ++} ++v2f64 ++__lsx_vfrintrne_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrintrne_d (_1); ++} ++v4f32 ++__lsx_vfrintrz_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrintrz_s (_1); ++} ++v2f64 ++__lsx_vfrintrz_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrintrz_d (_1); ++} ++v4f32 ++__lsx_vfrintrp_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrintrp_s (_1); ++} ++v2f64 ++__lsx_vfrintrp_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrintrp_d (_1); ++} ++v4f32 ++__lsx_vfrintrm_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrintrm_s (_1); ++} ++v2f64 ++__lsx_vfrintrm_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrintrm_d (_1); ++} ++void ++__lsx_vstelm_b (v16i8 _1, void *_2) ++{ ++ return __builtin_lsx_vstelm_b (_1, _2, 1, 1); ++} ++void ++__lsx_vstelm_h (v8i16 _1, void *_2) ++{ ++ return __builtin_lsx_vstelm_h (_1, _2, 2, 1); ++} ++void ++__lsx_vstelm_w (v4i32 _1, void *_2) ++{ ++ return __builtin_lsx_vstelm_w (_1, _2, 4, 1); ++} ++void ++__lsx_vstelm_d (v2i64 _1, void *_2) ++{ ++ return __builtin_lsx_vstelm_d (_1, _2, 8, 1); ++} ++v2i64 ++__lsx_vaddwev_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vaddwev_d_w (_1, _2); ++} ++v4i32 ++__lsx_vaddwev_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vaddwev_w_h (_1, _2); ++} ++v8i16 ++__lsx_vaddwev_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vaddwev_h_b (_1, _2); ++} ++v2i64 ++__lsx_vaddwod_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vaddwod_d_w (_1, _2); ++} ++v4i32 ++__lsx_vaddwod_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vaddwod_w_h (_1, _2); ++} ++v8i16 ++__lsx_vaddwod_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vaddwod_h_b (_1, _2); ++} ++v2i64 ++__lsx_vaddwev_d_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vaddwev_d_wu (_1, _2); ++} ++v4i32 ++__lsx_vaddwev_w_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vaddwev_w_hu (_1, _2); ++} ++v8i16 ++__lsx_vaddwev_h_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vaddwev_h_bu (_1, _2); ++} ++v2i64 ++__lsx_vaddwod_d_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vaddwod_d_wu (_1, _2); ++} ++v4i32 ++__lsx_vaddwod_w_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vaddwod_w_hu (_1, _2); ++} ++v8i16 ++__lsx_vaddwod_h_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vaddwod_h_bu (_1, _2); ++} ++v2i64 ++__lsx_vaddwev_d_wu_w (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vaddwev_d_wu_w (_1, _2); ++} ++v4i32 ++__lsx_vaddwev_w_hu_h (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vaddwev_w_hu_h (_1, _2); ++} ++v8i16 ++__lsx_vaddwev_h_bu_b (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vaddwev_h_bu_b (_1, _2); ++} ++v2i64 ++__lsx_vaddwod_d_wu_w (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vaddwod_d_wu_w (_1, _2); ++} ++v4i32 ++__lsx_vaddwod_w_hu_h (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vaddwod_w_hu_h (_1, _2); ++} ++v8i16 ++__lsx_vaddwod_h_bu_b (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vaddwod_h_bu_b (_1, _2); ++} ++v2i64 ++__lsx_vsubwev_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsubwev_d_w (_1, _2); ++} ++v4i32 ++__lsx_vsubwev_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsubwev_w_h (_1, _2); ++} ++v8i16 ++__lsx_vsubwev_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsubwev_h_b (_1, _2); ++} ++v2i64 ++__lsx_vsubwod_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsubwod_d_w (_1, _2); ++} ++v4i32 ++__lsx_vsubwod_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsubwod_w_h (_1, _2); ++} ++v8i16 ++__lsx_vsubwod_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsubwod_h_b (_1, _2); ++} ++v2i64 ++__lsx_vsubwev_d_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vsubwev_d_wu (_1, _2); ++} ++v4i32 ++__lsx_vsubwev_w_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vsubwev_w_hu (_1, _2); ++} ++v8i16 ++__lsx_vsubwev_h_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vsubwev_h_bu (_1, _2); ++} ++v2i64 ++__lsx_vsubwod_d_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vsubwod_d_wu (_1, _2); ++} ++v4i32 ++__lsx_vsubwod_w_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vsubwod_w_hu (_1, _2); ++} ++v8i16 ++__lsx_vsubwod_h_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vsubwod_h_bu (_1, _2); ++} ++v2i64 ++__lsx_vaddwev_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vaddwev_q_d (_1, _2); ++} ++v2i64 ++__lsx_vaddwod_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vaddwod_q_d (_1, _2); ++} ++v2i64 ++__lsx_vaddwev_q_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vaddwev_q_du (_1, _2); ++} ++v2i64 ++__lsx_vaddwod_q_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vaddwod_q_du (_1, _2); ++} ++v2i64 ++__lsx_vsubwev_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsubwev_q_d (_1, _2); ++} ++v2i64 ++__lsx_vsubwod_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsubwod_q_d (_1, _2); ++} ++v2i64 ++__lsx_vsubwev_q_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vsubwev_q_du (_1, _2); ++} ++v2i64 ++__lsx_vsubwod_q_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vsubwod_q_du (_1, _2); ++} ++v2i64 ++__lsx_vaddwev_q_du_d (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vaddwev_q_du_d (_1, _2); ++} ++v2i64 ++__lsx_vaddwod_q_du_d (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vaddwod_q_du_d (_1, _2); ++} ++v2i64 ++__lsx_vmulwev_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmulwev_d_w (_1, _2); ++} ++v4i32 ++__lsx_vmulwev_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmulwev_w_h (_1, _2); ++} ++v8i16 ++__lsx_vmulwev_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmulwev_h_b (_1, _2); ++} ++v2i64 ++__lsx_vmulwod_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmulwod_d_w (_1, _2); ++} ++v4i32 ++__lsx_vmulwod_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmulwod_w_h (_1, _2); ++} ++v8i16 ++__lsx_vmulwod_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmulwod_h_b (_1, _2); ++} ++v2i64 ++__lsx_vmulwev_d_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vmulwev_d_wu (_1, _2); ++} ++v4i32 ++__lsx_vmulwev_w_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vmulwev_w_hu (_1, _2); ++} ++v8i16 ++__lsx_vmulwev_h_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vmulwev_h_bu (_1, _2); ++} ++v2i64 ++__lsx_vmulwod_d_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vmulwod_d_wu (_1, _2); ++} ++v4i32 ++__lsx_vmulwod_w_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vmulwod_w_hu (_1, _2); ++} ++v8i16 ++__lsx_vmulwod_h_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vmulwod_h_bu (_1, _2); ++} ++v2i64 ++__lsx_vmulwev_d_wu_w (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmulwev_d_wu_w (_1, _2); ++} ++v4i32 ++__lsx_vmulwev_w_hu_h (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmulwev_w_hu_h (_1, _2); ++} ++v8i16 ++__lsx_vmulwev_h_bu_b (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmulwev_h_bu_b (_1, _2); ++} ++v2i64 ++__lsx_vmulwod_d_wu_w (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmulwod_d_wu_w (_1, _2); ++} ++v4i32 ++__lsx_vmulwod_w_hu_h (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmulwod_w_hu_h (_1, _2); ++} ++v8i16 ++__lsx_vmulwod_h_bu_b (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmulwod_h_bu_b (_1, _2); ++} ++v2i64 ++__lsx_vmulwev_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmulwev_q_d (_1, _2); ++} ++v2i64 ++__lsx_vmulwod_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmulwod_q_d (_1, _2); ++} ++v2i64 ++__lsx_vmulwev_q_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vmulwev_q_du (_1, _2); ++} ++v2i64 ++__lsx_vmulwod_q_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vmulwod_q_du (_1, _2); ++} ++v2i64 ++__lsx_vmulwev_q_du_d (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmulwev_q_du_d (_1, _2); ++} ++v2i64 ++__lsx_vmulwod_q_du_d (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmulwod_q_du_d (_1, _2); ++} ++v2i64 ++__lsx_vhaddw_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vhaddw_q_d (_1, _2); ++} ++v2u64 ++__lsx_vhaddw_qu_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vhaddw_qu_du (_1, _2); ++} ++v2i64 ++__lsx_vhsubw_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vhsubw_q_d (_1, _2); ++} ++v2u64 ++__lsx_vhsubw_qu_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vhsubw_qu_du (_1, _2); ++} ++v2i64 ++__lsx_vmaddwev_d_w (v2i64 _1, v4i32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vmaddwev_d_w (_1, _2, _3); ++} ++v4i32 ++__lsx_vmaddwev_w_h (v4i32 _1, v8i16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vmaddwev_w_h (_1, _2, _3); ++} ++v8i16 ++__lsx_vmaddwev_h_b (v8i16 _1, v16i8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vmaddwev_h_b (_1, _2, _3); ++} ++v2u64 ++__lsx_vmaddwev_d_wu (v2u64 _1, v4u32 _2, v4u32 _3) ++{ ++ return __builtin_lsx_vmaddwev_d_wu (_1, _2, _3); ++} ++v4u32 ++__lsx_vmaddwev_w_hu (v4u32 _1, v8u16 _2, v8u16 _3) ++{ ++ return __builtin_lsx_vmaddwev_w_hu (_1, _2, _3); ++} ++v8u16 ++__lsx_vmaddwev_h_bu (v8u16 _1, v16u8 _2, v16u8 _3) ++{ ++ return __builtin_lsx_vmaddwev_h_bu (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwod_d_w (v2i64 _1, v4i32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vmaddwod_d_w (_1, _2, _3); ++} ++v4i32 ++__lsx_vmaddwod_w_h (v4i32 _1, v8i16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vmaddwod_w_h (_1, _2, _3); ++} ++v8i16 ++__lsx_vmaddwod_h_b (v8i16 _1, v16i8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vmaddwod_h_b (_1, _2, _3); ++} ++v2u64 ++__lsx_vmaddwod_d_wu (v2u64 _1, v4u32 _2, v4u32 _3) ++{ ++ return __builtin_lsx_vmaddwod_d_wu (_1, _2, _3); ++} ++v4u32 ++__lsx_vmaddwod_w_hu (v4u32 _1, v8u16 _2, v8u16 _3) ++{ ++ return __builtin_lsx_vmaddwod_w_hu (_1, _2, _3); ++} ++v8u16 ++__lsx_vmaddwod_h_bu (v8u16 _1, v16u8 _2, v16u8 _3) ++{ ++ return __builtin_lsx_vmaddwod_h_bu (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwev_d_wu_w (v2i64 _1, v4u32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vmaddwev_d_wu_w (_1, _2, _3); ++} ++v4i32 ++__lsx_vmaddwev_w_hu_h (v4i32 _1, v8u16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vmaddwev_w_hu_h (_1, _2, _3); ++} ++v8i16 ++__lsx_vmaddwev_h_bu_b (v8i16 _1, v16u8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vmaddwev_h_bu_b (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwod_d_wu_w (v2i64 _1, v4u32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vmaddwod_d_wu_w (_1, _2, _3); ++} ++v4i32 ++__lsx_vmaddwod_w_hu_h (v4i32 _1, v8u16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vmaddwod_w_hu_h (_1, _2, _3); ++} ++v8i16 ++__lsx_vmaddwod_h_bu_b (v8i16 _1, v16u8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vmaddwod_h_bu_b (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwev_q_d (v2i64 _1, v2i64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vmaddwev_q_d (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwod_q_d (v2i64 _1, v2i64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vmaddwod_q_d (_1, _2, _3); ++} ++v2u64 ++__lsx_vmaddwev_q_du (v2u64 _1, v2u64 _2, v2u64 _3) ++{ ++ return __builtin_lsx_vmaddwev_q_du (_1, _2, _3); ++} ++v2u64 ++__lsx_vmaddwod_q_du (v2u64 _1, v2u64 _2, v2u64 _3) ++{ ++ return __builtin_lsx_vmaddwod_q_du (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwev_q_du_d (v2i64 _1, v2u64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vmaddwev_q_du_d (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwod_q_du_d (v2i64 _1, v2u64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vmaddwod_q_du_d (_1, _2, _3); ++} ++v16i8 ++__lsx_vrotr_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vrotr_b (_1, _2); ++} ++v8i16 ++__lsx_vrotr_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vrotr_h (_1, _2); ++} ++v4i32 ++__lsx_vrotr_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vrotr_w (_1, _2); ++} ++v2i64 ++__lsx_vrotr_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vrotr_d (_1, _2); ++} ++v2i64 ++__lsx_vadd_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vadd_q (_1, _2); ++} ++v2i64 ++__lsx_vsub_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsub_q (_1, _2); ++} ++v16i8 ++__lsx_vldrepl_b (void *_1) ++{ ++ return __builtin_lsx_vldrepl_b (_1, 1); ++} ++v8i16 ++__lsx_vldrepl_h (void *_1) ++{ ++ return __builtin_lsx_vldrepl_h (_1, 2); ++} ++v4i32 ++__lsx_vldrepl_w (void *_1) ++{ ++ return __builtin_lsx_vldrepl_w (_1, 4); ++} ++v2i64 ++__lsx_vldrepl_d (void *_1) ++{ ++ return __builtin_lsx_vldrepl_d (_1, 8); ++} ++v16i8 ++__lsx_vmskgez_b (v16i8 _1) ++{ ++ return __builtin_lsx_vmskgez_b (_1); ++} ++v16i8 ++__lsx_vmsknz_b (v16i8 _1) ++{ ++ return __builtin_lsx_vmsknz_b (_1); ++} ++v8i16 ++__lsx_vexth_h_b (v16i8 _1) ++{ ++ return __builtin_lsx_vexth_h_b (_1); ++} ++v4i32 ++__lsx_vexth_w_h (v8i16 _1) ++{ ++ return __builtin_lsx_vexth_w_h (_1); ++} ++v2i64 ++__lsx_vexth_d_w (v4i32 _1) ++{ ++ return __builtin_lsx_vexth_d_w (_1); ++} ++v2i64 ++__lsx_vexth_q_d (v2i64 _1) ++{ ++ return __builtin_lsx_vexth_q_d (_1); ++} ++v8u16 ++__lsx_vexth_hu_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vexth_hu_bu (_1); ++} ++v4u32 ++__lsx_vexth_wu_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vexth_wu_hu (_1); ++} ++v2u64 ++__lsx_vexth_du_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vexth_du_wu (_1); ++} ++v2u64 ++__lsx_vexth_qu_du (v2u64 _1) ++{ ++ return __builtin_lsx_vexth_qu_du (_1); ++} ++v16i8 ++__lsx_vrotri_b (v16i8 _1) ++{ ++ return __builtin_lsx_vrotri_b (_1, 1); ++} ++v8i16 ++__lsx_vrotri_h (v8i16 _1) ++{ ++ return __builtin_lsx_vrotri_h (_1, 1); ++} ++v4i32 ++__lsx_vrotri_w (v4i32 _1) ++{ ++ return __builtin_lsx_vrotri_w (_1, 1); ++} ++v2i64 ++__lsx_vrotri_d (v2i64 _1) ++{ ++ return __builtin_lsx_vrotri_d (_1, 1); ++} ++v2i64 ++__lsx_vextl_q_d (v2i64 _1) ++{ ++ return __builtin_lsx_vextl_q_d (_1); ++} ++v16i8 ++__lsx_vsrlni_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrlni_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vsrlni_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrlni_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vsrlni_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrlni_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vsrlni_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrlni_d_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vsrlrni_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrlrni_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vsrlrni_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrlrni_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vsrlrni_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrlrni_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vsrlrni_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrlrni_d_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vssrlni_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrlni_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vssrlni_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrlni_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vssrlni_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrlni_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vssrlni_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrlni_d_q (_1, _2, 1); ++} ++v16u8 ++__lsx_vssrlni_bu_h (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrlni_bu_h (_1, _2, 1); ++} ++v8u16 ++__lsx_vssrlni_hu_w (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrlni_hu_w (_1, _2, 1); ++} ++v4u32 ++__lsx_vssrlni_wu_d (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrlni_wu_d (_1, _2, 1); ++} ++v2u64 ++__lsx_vssrlni_du_q (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrlni_du_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vssrlrni_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrlrni_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vssrlrni_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrlrni_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vssrlrni_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrlrni_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vssrlrni_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrlrni_d_q (_1, _2, 1); ++} ++v16u8 ++__lsx_vssrlrni_bu_h (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrlrni_bu_h (_1, _2, 1); ++} ++v8u16 ++__lsx_vssrlrni_hu_w (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrlrni_hu_w (_1, _2, 1); ++} ++v4u32 ++__lsx_vssrlrni_wu_d (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrlrni_wu_d (_1, _2, 1); ++} ++v2u64 ++__lsx_vssrlrni_du_q (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrlrni_du_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vsrani_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrani_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vsrani_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrani_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vsrani_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrani_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vsrani_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrani_d_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vsrarni_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrarni_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vsrarni_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrarni_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vsrarni_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrarni_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vsrarni_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrarni_d_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vssrani_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrani_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vssrani_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrani_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vssrani_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrani_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vssrani_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrani_d_q (_1, _2, 1); ++} ++v16u8 ++__lsx_vssrani_bu_h (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrani_bu_h (_1, _2, 1); ++} ++v8u16 ++__lsx_vssrani_hu_w (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrani_hu_w (_1, _2, 1); ++} ++v4u32 ++__lsx_vssrani_wu_d (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrani_wu_d (_1, _2, 1); ++} ++v2u64 ++__lsx_vssrani_du_q (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrani_du_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vssrarni_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrarni_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vssrarni_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrarni_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vssrarni_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrarni_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vssrarni_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrarni_d_q (_1, _2, 1); ++} ++v16u8 ++__lsx_vssrarni_bu_h (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrarni_bu_h (_1, _2, 1); ++} ++v8u16 ++__lsx_vssrarni_hu_w (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrarni_hu_w (_1, _2, 1); ++} ++v4u32 ++__lsx_vssrarni_wu_d (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrarni_wu_d (_1, _2, 1); ++} ++v2u64 ++__lsx_vssrarni_du_q (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrarni_du_q (_1, _2, 1); ++} ++v4i32 ++__lsx_vpermi_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vpermi_w (_1, _2, 1); ++} ++v16i8 ++__lsx_vld (void *_1) ++{ ++ return __builtin_lsx_vld (_1, 1); ++} ++void ++__lsx_vst (v16i8 _1, void *_2) ++{ ++ return __builtin_lsx_vst (_1, _2, 1); ++} ++v16i8 ++__lsx_vssrlrn_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrlrn_b_h (_1, _2); ++} ++v8i16 ++__lsx_vssrlrn_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrlrn_h_w (_1, _2); ++} ++v4i32 ++__lsx_vssrlrn_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrlrn_w_d (_1, _2); ++} ++v16i8 ++__lsx_vssrln_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrln_b_h (_1, _2); ++} ++v8i16 ++__lsx_vssrln_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrln_h_w (_1, _2); ++} ++v4i32 ++__lsx_vssrln_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrln_w_d (_1, _2); ++} ++v16i8 ++__lsx_vorn_v (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vorn_v (_1, _2); ++} ++v2i64 ++__lsx_vldi () ++{ ++ return __builtin_lsx_vldi (1); ++} ++v16i8 ++__lsx_vshuf_b (v16i8 _1, v16i8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vshuf_b (_1, _2, _3); ++} ++v16i8 ++__lsx_vldx (void *_1) ++{ ++ return __builtin_lsx_vldx (_1, 1); ++} ++void ++__lsx_vstx (v16i8 _1, void *_2) ++{ ++ return __builtin_lsx_vstx (_1, _2, 1); ++} ++v2u64 ++__lsx_vextl_qu_du (v2u64 _1) ++{ ++ return __builtin_lsx_vextl_qu_du (_1); ++} ++int ++__lsx_bnz_b (v16u8 _1) ++{ ++ return __builtin_lsx_bnz_b (_1); ++} ++int ++__lsx_bnz_d (v2u64 _1) ++{ ++ return __builtin_lsx_bnz_d (_1); ++} ++int ++__lsx_bnz_h (v8u16 _1) ++{ ++ return __builtin_lsx_bnz_h (_1); ++} ++int ++__lsx_bnz_v (v16u8 _1) ++{ ++ return __builtin_lsx_bnz_v (_1); ++} ++int ++__lsx_bnz_w (v4u32 _1) ++{ ++ return __builtin_lsx_bnz_w (_1); ++} ++int ++__lsx_bz_b (v16u8 _1) ++{ ++ return __builtin_lsx_bz_b (_1); ++} ++int ++__lsx_bz_d (v2u64 _1) ++{ ++ return __builtin_lsx_bz_d (_1); ++} ++int ++__lsx_bz_h (v8u16 _1) ++{ ++ return __builtin_lsx_bz_h (_1); ++} ++int ++__lsx_bz_v (v16u8 _1) ++{ ++ return __builtin_lsx_bz_v (_1); ++} ++int ++__lsx_bz_w (v4u32 _1) ++{ ++ return __builtin_lsx_bz_w (_1); ++} ++v2i64 ++__lsx_vfcmp_caf_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_caf_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_caf_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_caf_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_ceq_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_ceq_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_ceq_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_ceq_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cle_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cle_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cle_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cle_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_clt_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_clt_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_clt_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_clt_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cne_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cne_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cne_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cne_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cor_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cor_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cor_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cor_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cueq_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cueq_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cueq_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cueq_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cule_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cule_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cule_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cule_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cult_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cult_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cult_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cult_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cun_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cun_d (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cune_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cune_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cune_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cune_s (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cun_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cun_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_saf_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_saf_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_saf_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_saf_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_seq_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_seq_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_seq_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_seq_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sle_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sle_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sle_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sle_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_slt_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_slt_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_slt_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_slt_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sne_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sne_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sne_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sne_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sor_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sor_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sor_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sor_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sueq_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sueq_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sueq_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sueq_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sule_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sule_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sule_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sule_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sult_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sult_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sult_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sult_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sun_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sun_d (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sune_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sune_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sune_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sune_s (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sun_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sun_s (_1, _2); ++} ++v16i8 ++__lsx_vrepli_b () ++{ ++ return __builtin_lsx_vrepli_b (1); ++} ++v2i64 ++__lsx_vrepli_d () ++{ ++ return __builtin_lsx_vrepli_d (1); ++} ++v8i16 ++__lsx_vrepli_h () ++{ ++ return __builtin_lsx_vrepli_h (1); ++} ++v4i32 ++__lsx_vrepli_w () ++{ ++ return __builtin_lsx_vrepli_w (1); ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-addition-instructi.patch b/LoongArch-Add-tests-for-SX-vector-addition-instructi.patch new file mode 100644 index 0000000000000000000000000000000000000000..441b2c0b2556835ed09d77f119cd08bd01b619d6 --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-addition-instructi.patch @@ -0,0 +1,7181 @@ +From 2cb3122527add8fee54dca91824d82a02d5602e3 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 09:58:48 +0800 +Subject: [PATCH 080/124] LoongArch: Add tests for SX vector addition + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vadd.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vadda.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vaddi.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmadd.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vadd.c | 416 +++++++++++++++ + .../loongarch/vector/lsx/lsx-vadda.c | 344 ++++++++++++ + .../loongarch/vector/lsx/lsx-vaddi.c | 251 +++++++++ + .../loongarch/vector/lsx/lsx-vaddwev-1.c | 335 ++++++++++++ + .../loongarch/vector/lsx/lsx-vaddwev-2.c | 344 ++++++++++++ + .../loongarch/vector/lsx/lsx-vaddwev-3.c | 425 +++++++++++++++ + .../loongarch/vector/lsx/lsx-vaddwod-1.c | 408 +++++++++++++++ + .../loongarch/vector/lsx/lsx-vaddwod-2.c | 344 ++++++++++++ + .../loongarch/vector/lsx/lsx-vaddwod-3.c | 237 +++++++++ + .../loongarch/vector/lsx/lsx-vhaddw-1.c | 488 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vhaddw-2.c | 452 ++++++++++++++++ + .../loongarch/vector/lsx/lsx-vmadd.c | 450 ++++++++++++++++ + .../loongarch/vector/lsx/lsx-vmaddwev-1.c | 472 +++++++++++++++++ + .../loongarch/vector/lsx/lsx-vmaddwev-2.c | 383 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vmaddwev-3.c | 383 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vmaddwod-1.c | 372 +++++++++++++ + .../loongarch/vector/lsx/lsx-vmaddwod-2.c | 438 ++++++++++++++++ + .../loongarch/vector/lsx/lsx-vmaddwod-3.c | 460 +++++++++++++++++ + 18 files changed, 7002 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c +new file mode 100644 +index 000000000..7cfb989e4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c +@@ -0,0 +1,416 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002010000fc000b; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000017fda829; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000001fffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f7f7f7f00107f04; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f0000fd7f0000fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x7e7e7e7eff0f7f04; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f0000fd7f01fffb; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x195f307a5d04acbb; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6a1a3fbb3c90260e; ++ *((unsigned long *)&__m128i_result[1]) = 0x19df307a5d04acbb; ++ *((unsigned long *)&__m128i_result[0]) = 0x5ed032b06bde1ab6; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5555001400005111; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffabbeab55110140; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5555001400005111; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffabbeab55110140; ++ *((unsigned long *)&__m128i_result[1]) = 0xaaaa00280000a222; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe567c56aa220280; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0982e2daf234ed87; ++ *((unsigned long *)&__m128i_result[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_result[0]) = 0x0982e2daf234ed87; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000073; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000002a; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000049000000c0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffff29; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000bd3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000bd30; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000d7fff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000007a6d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000dfefe0000; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefa000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefefefefefefefe; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0038000000051fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003c000000022021; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff0101ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffffa0204000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f370101ff04ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f3bffffa0226021; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1baf8eabd26bc629; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1c2640b9a8e9fb49; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002dab8746acf8e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00036dd1c5c15856; ++ *((unsigned long *)&__m128i_result[1]) = 0x1bb1686346d595b7; ++ *((unsigned long *)&__m128i_result[0]) = 0x1c29ad8a6daa539f; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfeffffffffff0002; ++ __m128i_out = __lsx_vadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001ffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000c3080000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff81ffffc3080000; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x004200a000200001; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001f0000001f; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0029aeaca57d74e6; ++ *((unsigned long *)&__m128i_op0[0]) = 0xdbe332365392c686; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000056f64adb9464; ++ *((unsigned long *)&__m128i_op1[0]) = 0x29ca096f235819c2; ++ *((unsigned long *)&__m128i_result[1]) = 0x002a05a2f059094a; ++ *((unsigned long *)&__m128i_result[0]) = 0x05ad3ba576eae048; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000040d; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001000000ff; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000300000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000002fffffffb; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010000fffb; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000060000000e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001201fe01e9; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000060000000e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001201fe01e9; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000c0000001c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002403fc03d2; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff1000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff1000100010001; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128i_result[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_result[0]) = 0xa352bfac9269e0aa; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffa; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001001100110068; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001001100110067; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x379674c000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3789f68000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x379674c000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3789f68000000000; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000555889; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000002580f01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00060fbf02040fbf; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00020fbf02000fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x00060fbf02596848; ++ *((unsigned long *)&__m128i_result[0]) = 0x00020fbf04581ec0; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001388928513889; ++ *((unsigned long *)&__m128i_op0[0]) = 0x006938094a013889; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001388928513889; ++ *((unsigned long *)&__m128i_op1[0]) = 0x006938094a013889; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002711250a27112; ++ *((unsigned long *)&__m128i_result[0]) = 0x00d2701294027112; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_result[1]) = 0x202544f490f2de35; ++ *((unsigned long *)&__m128i_result[0]) = 0x202544f490f2de35; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c +new file mode 100644 +index 000000000..4bb699eab +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c +@@ -0,0 +1,344 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[0]) = 0x52527d7d52527d7d; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffc001f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010202050120; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010102020202; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000700020005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000700020005; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f8000004f800000; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003000300030004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000300030004; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5c9c9c9ce3636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x63635c9e63692363; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf0fd800080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000a00028004000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6b9fe3649c9d6363; ++ *((unsigned long *)&__m128i_result[0]) = 0x6363bc9e8b696363; ++ __m128i_out = __lsx_vadda_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x1111113111111131; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111113111111131; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000006a9a5c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000092444; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000006a9a5c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000092444; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000d4ccb8; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000124888; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x76f424887fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff082f000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000f7d1000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x773324887fffffff; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5a6f5c53ebed3faa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa36aca4435b8b8e1; ++ *((unsigned long *)&__m128i_result[1]) = 0x5a6f61865d36d3aa; ++ *((unsigned long *)&__m128i_result[0]) = 0x7bea6962a0bfb621; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000008140c80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000008140c80; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000fffe0000ff45; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff000000b9; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffd5002affffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x343d8dc6b0ed5a08; ++ *((unsigned long *)&__m128i_result[1]) = 0x012b012c01010246; ++ *((unsigned long *)&__m128i_result[0]) = 0x353e743b50135a4f; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003c853c843c87e; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe000ffdf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000200000002001; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000001fff0021; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010109; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000005452505; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000004442403e4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffe0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000005452505; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000044525043c; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000208000002080; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003f0000003f0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f0000003f0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x803e0000803e0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x803e0000803e0000; ++ __m128i_out = __lsx_vadda_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000008000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000008000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000008000; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001400000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff9000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc000400000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0007001400000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004001000000000; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefeff00fefeff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefeff00fefeff00; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000024170000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000020300000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000044470000; ++ __m128i_out = __lsx_vadda_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff01ff01ac025c87; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff01ff01ac465ca1; ++ *((unsigned long *)&__m128i_result[1]) = 0x64616462b76106dc; ++ *((unsigned long *)&__m128i_result[0]) = 0x64616462b71d06c2; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffaeffaeffaeffae; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffaeffaeffaeffae; ++ *((unsigned long *)&__m128i_result[1]) = 0x0051005200510052; ++ *((unsigned long *)&__m128i_result[0]) = 0x0051005200510052; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3bc000003a800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4480000044800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x45c0000044800000; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[0]) = 0x6363636463636363; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c +new file mode 100644 +index 000000000..77afabe92 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c +@@ -0,0 +1,251 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x1414141414141415; ++ *((unsigned long *)&__m128i_result[0]) = 0x1414141414141415; ++ __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0505050505050505; ++ *((unsigned long *)&__m128i_result[0]) = 0x0505050504040404; ++ __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000008140c80; ++ *((unsigned long *)&__m128i_result[1]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_result[0]) = 0x1f1f1f1f27332b9f; ++ __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_result[0]) = 0x0303030303030304; ++ __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x8f8f8f8f8f8f8f8f; ++ *((unsigned long *)&__m128i_result[0]) = 0x8f8f8f8f8f8f8f8f; ++ __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0018001800180018; ++ *((unsigned long *)&__m128i_result[0]) = 0x0018001800180018; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0019081900190019; ++ *((unsigned long *)&__m128i_result[0]) = 0x0019081900190019; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x000a000a000a000a; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc1000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffcc000b000b000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000b000b010a000b; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m128i_result[0]) = 0x001f001f001f001f; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001c001c001c001c; ++ *((unsigned long *)&__m128i_result[0]) = 0x001c001c001c001c; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x680485c8b304b019; ++ *((unsigned long *)&__m128i_result[0]) = 0xc89d7f0fed582019; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000a0000000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000a0000000a; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000090100000a; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe009ffff2008; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000300000003; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long *)&__m128i_result[1]) = 0xfc01fd13fc02fe0c; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe00fd14fe01fd16; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000bd3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000c0000bd49; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000c7fff000c; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000500000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000005fffe0006; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000fffffeff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000009ffffff08; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000900000009; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x55aa55aa55aa55ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaa55555655aaaaa8; ++ *((unsigned long *)&__m128i_result[1]) = 0x55aa55c355aa55c4; ++ *((unsigned long *)&__m128i_result[0]) = 0xaa55556f55aaaac1; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000e0000002e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000e0000004e; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x003f000400000003; ++ *((unsigned long *)&__m128i_result[0]) = 0x003f000400000003; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff8000010f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000900000009; ++ *((unsigned long *)&__m128i_result[0]) = 0xff80000a0f800009; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb020302101b03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_result[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_result[0]) = 0x020310edc003023d; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x59f7fd7059f7fd70; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_result[0]) = 0x59f7fd8759f7fd87; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6420e0208400c4c4; ++ *((unsigned long *)&__m128i_op0[0]) = 0x20c4e0c4e0da647a; ++ *((unsigned long *)&__m128i_result[1]) = 0x6420e0208400c4e3; ++ *((unsigned long *)&__m128i_result[0]) = 0x20c4e0c4e0da6499; ++ __m128i_out = __lsx_vaddi_du (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d001b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x21201f1e1d001b25; ++ *((unsigned long *)&__m128i_result[0]) = 0x191817161514131d; ++ __m128i_out = __lsx_vaddi_du (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vaddi_du (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007770ffff9411; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007770ffff941d; ++ __m128i_out = __lsx_vaddi_du (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000016; ++ __m128i_out = __lsx_vaddi_du (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000080000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vaddi_du (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c +new file mode 100644 +index 000000000..b7b16a325 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c +@@ -0,0 +1,335 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fffffff80000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00003ffd000a4000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffcffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffd000a0000; ++ __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff000000ff00ff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000049ffffff4d; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff01ffffffff; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000005e695e95; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5e695e96c396b402; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000005e94; ++ *((unsigned long *)&__m128i_result[0]) = 0x00005e96ffffb402; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffb; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffb; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000100000000fc; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000100000000fc; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000158; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000005d5d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000005d5d; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5c9c9c9ce3636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x63635c9e63692363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffe3636363; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000063692363; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0202020202020203; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0202020202020203; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000002020202; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000002020202; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1817161517161514; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1615141315141312; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x76f424887fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000017161515; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000095141311; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000fffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfdfef9ff0efff900; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffcfd000000fb00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fe00f8000700; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000fb01; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000007000000; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000080806362; ++ *((unsigned long *)&__m128i_op1[0]) = 0x807f808000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80806362; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff00ff; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000010002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff960015; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000010002; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffff960015; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000047e59090; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffb8145f50; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff008ff820; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff008ff820; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000011ff040; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000100010001fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100010001fffd; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffc2ffe700000007; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffc100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffc100010001; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff80df00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000dfa6e0c6; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000d46cdc13; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000d46cdc13; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe813f00fe813f00; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c +new file mode 100644 +index 000000000..a407cadfb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c +@@ -0,0 +1,344 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x061006100613030c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4d6814ef9c77ce46; ++ *((unsigned long *)&__m128i_result[1]) = 0x010f010f0112010b; ++ *((unsigned long *)&__m128i_result[0]) = 0x016701ee01760145; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffac0a000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ac00000000; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf589caff5605f2fa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128i_op1[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000eb00ab; ++ *((unsigned long *)&__m128i_result[0]) = 0x017400ff004500fa; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_op0[0]) = 0x203e16d116de012b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000017d7000001e4; ++ *((unsigned long *)&__m128i_result[0]) = 0x000016d10000012b; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff3fbfffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001c8520000c97d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001c8520001c87d; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffac0a000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000085af0000b000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00017ea200002000; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000024; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000024; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00307028003f80b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0040007fff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000003f80b0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff800000; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001f; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff80ffffff80ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000018080807f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ffff80fe; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff8000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff8000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff8000000000; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000000010000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000180100100000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000b5207f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001801b5307f80; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff208fffffa02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff208fffffa02; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003fbf3fbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff8007; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8000007f800000; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd27db010d20fbf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffd27db010d20fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffa4fb6021a41f7e; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c +new file mode 100644 +index 000000000..4d5c60998 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c +@@ -0,0 +1,425 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0403cfcf01c1595e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x837cd5db43fc55d4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_result[1]) = 0x0007005200440062; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080005e007f00d8; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcafff8ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe6d4572c8a5835bc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe5017c2ac9ca9fd0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00d3012b015700bb; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001002affca0070; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000fea0000fffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363771163631745; ++ *((unsigned long *)&__m128i_op1[0]) = 0x636363ec6363636c; ++ *((unsigned long *)&__m128i_result[1]) = 0x006300fb00630143; ++ *((unsigned long *)&__m128i_result[0]) = 0x0063ffec0063006c; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c9c9c9c9d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080ffffffff8080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00008080ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80ffffffffff80; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff80ffffffff; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00197f26cb658837; ++ *((unsigned long *)&__m128i_op0[0]) = 0x01009aa4a301084b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_result[1]) = 0x0037ffd40083ffe5; ++ *((unsigned long *)&__m128i_result[0]) = 0x001e0052001ffff9; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff00ffffff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000090900000998; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000900ffff98; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff800000; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1e0200001e020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffcfffcfffcfffd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffcfffdfffcfffd; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffcfffffffd; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffdfffffffd; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff000000ff00; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010100000101; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa2f54a1ea2f54a1e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x00004a1e00004a1e; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000868686868686; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000868600008785; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x63636b6afe486741; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e880ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe363636363abdf16; ++ *((unsigned long *)&__m128i_op1[0]) = 0x41f8e08016161198; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000cecd00004657; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000c90000011197; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001000f000e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fff1000ffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000f000e; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000ffffe; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c07e181ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3430af9effffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000fe00ff; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00060012000e002b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000049ffffffaa; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000e002b; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffaa; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000bfffffffe0f6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff7a53; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff7f80ffff7f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff7f80ffff7f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff7f80ffff7f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff7f80ffff7f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffeff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffeff00; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000003dffc2; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080006b0000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff00ff; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000055555555; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff7f810100001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000400530050ffa6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff007fff810001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000400530050ffa6; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffff811001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000a1ff4c; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000008000001e; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x86dd8341b164f12b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9611c3985b3159f5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000035697d4e; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000013ecaadf2; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ef00ff010f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff010f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc1f03e1042208410; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001000110; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000431f851f; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000030000003f; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffbfffffffbe; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x06b1213ef1efa299; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8312f5424ca4a07f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1f1f1f1f1f1f1f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f1f1f27332b9f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xa23214697fd03f7f; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x80000000ffffd860; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff80000000; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c +new file mode 100644 +index 000000000..0ebe8c8a9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c +@@ -0,0 +1,408 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ca354688; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_result[1]) = 0x00040003ff83ff84; ++ *((unsigned long *)&__m128i_result[0]) = 0x00040003ff4dffca; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000040d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001f5400000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001f00000000; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000f80007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xb); ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffff0100ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffeffff; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x478b478b38031779; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6b769e690fa1e119; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001030103; ++ *((unsigned long *)&__m128i_result[1]) = 0x0047004700380017; ++ *((unsigned long *)&__m128i_result[0]) = 0x006bff9e0010ffe2; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long *)&__m128i_result[0]) = 0xff76ffd8ffe6ffaa; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f54e0ab00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001f5400000000; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffd70b00006ea9; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffa352ffff9269; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffd70b00006ea9; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffa352ffff9269; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8144ffff01c820a4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9b2ee1a4034b4e34; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff80c400000148; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff80c1ffffe8de; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffefffffffe; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa486c90f6537b8d7; ++ *((unsigned long *)&__m128i_op0[0]) = 0x58bcc2013ea1cc1e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffa486c90f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000058bcc201; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001802041b0014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003004; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff02000200; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffdfff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffdfff; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fbf83468; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fbf83468; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff82bb9784; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffc6bb97ac; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007ffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001000fbff9; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002ff9afef; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000004f804f81; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000004f804f80; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000fff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffe00029f9f6061; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x64e464e464e464e4; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffeffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000064e264e6; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0305030203020502; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0301030203020502; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000003050302; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000003010302; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x01fc020000fe0100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff0000ff0000; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00a6ffceffb60052; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff84fff4ff84fff4; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000fefefe6a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fefefe6a; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5a57bacbd7e39680; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6bae051ffed76001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf3e6586b60d7b152; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf7077b934ac0e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4e3e133738bb47d2; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000117d00007f7b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000093d0000187f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7d7f027f7c7f7c79; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7e7f7e7f027f032f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7d7f13fc7c7ffbf4; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c +new file mode 100644 +index 000000000..379517f39 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c +@@ -0,0 +1,344 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x08fdc221bfdb1927; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4303c67e9b7fb213; ++ *((unsigned long *)&__m128i_op1[1]) = 0x08fdc221bfdb1927; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4303c67e9b7fb213; ++ *((unsigned long *)&__m128i_result[1]) = 0x00100184017e0032; ++ *((unsigned long *)&__m128i_result[0]) = 0x0086018c01360164; ++ __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff77777807777775; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe6eeef00eeeeeebf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000f00f; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff007700070077; ++ *((unsigned long *)&__m128i_result[0]) = 0x00e600ef00ee01de; ++ __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4429146a7b4c88b2; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe22b3595efa4aa0c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000442900007b4c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000e22b0000efa4; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000600000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000636500006363; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000a6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080800000808; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffe0001fefc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fffe0001fefc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff8000010f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff8000010f78; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffc01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffc01; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001fffffffe; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000006e17bfd8; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f000400000003; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f000400000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000400004; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000003f0004; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000017f800001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000017f800001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007f800001; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007f800001; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x379674c000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3789f68000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x379674c000000000; ++ __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c +new file mode 100644 +index 000000000..30dc83518 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c +@@ -0,0 +1,237 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000a16316b0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x16161616a16316b0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ffffa10016; ++ *((unsigned long *)&__m128i_result[0]) = 0x01150115ffa10016; ++ __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007e007e007e007e; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000200020; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000003f; ++ __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000fe00fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe00fe00fe00fe; ++ __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000011ffee; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000dfff2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00e0000000e00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000e0000000e0; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff7100fffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ffffa10016; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01150115ffa10016; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100fe000070a1; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000115ffffffa1; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffe0000fffe; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff208fffffa02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000001000f00fe00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000017fff00fe7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffff00; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x04faf60009f5f092; ++ *((unsigned long *)&__m128i_op0[0]) = 0x04fafa9200000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff9fffefff9ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000004fa000009f5; ++ *((unsigned long *)&__m128i_result[0]) = 0x000004f3fffffff9; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000c2f90000bafa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000c2fa8000c2fa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000c2f90000bafa; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000003fffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001fff00001fff; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000807bf0a1f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000800ecedee68; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5847b72626ce61ef; ++ *((unsigned long *)&__m128i_op1[0]) = 0x110053f401e7cced; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x5847bf2de5d8816f; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000155; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c +new file mode 100644 +index 000000000..af75f8e4e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c +@@ -0,0 +1,488 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4ee85545068f3133; ++ *((unsigned long *)&__m128i_op0[0]) = 0x870968c1f56bb3cd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x004e005500060031; ++ *((unsigned long *)&__m128i_result[0]) = 0xff870068fff5ffb3; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff082f000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc04d600d3aded151; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x004cff8fffde0051; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x003f0000ffffffff; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0042003e0042002f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fffc0001fffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0042003e0042002f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001fffc0001fffc; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000750500006541; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000100fffffefd; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00ffff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffe000000f6; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001fffffffe; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4050000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000f80007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000f8; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80ffffffffff80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff80ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffffffe; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xce9035c49ffff570; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0xce9035c49ffff574; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000454ffff9573; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80007fc000003f00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7d187e427c993f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7500000075000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7500000075000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007d1800007c99; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5555000054100000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5555000154100155; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000155; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffebe6ed565; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffebe6ed565; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffbe6ed563; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x78c00000ff000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000078c00000; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7d3ac60000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007d3ac600; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff82bb9784; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffffc6bb97ac; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff82bb9784; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffc6bb97ac; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000003effff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000003effff; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffff359f358; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffff359f358; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000e2e36363; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000063636363; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff02000200; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe00001ffe200; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000383; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe400000003ffc001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe000ffff2382; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e39e496cbc9; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x03574e38e496cbc9; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xabff54e911f71b07; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa9ec4882f216ea11; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xaa0051e90ff91808; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000030000003f; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffff000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0001ffff9514; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x9c9c9c9c9c9c9c9c; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000400000001; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80ff807e017f01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f3b7f3f7f3b7f21; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0a0000001e000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a000000f6000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0980ff8174017f01; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000ef0000000003b; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000011ff040; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c +new file mode 100644 +index 000000000..37c769a2d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c +@@ -0,0 +1,452 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff0000; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080000000000000; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0052005200520052; ++ *((unsigned long *)&__m128i_result[0]) = 0x0052005200520052; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000fffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff000000ff; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00060012000e002b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000049ffffffaa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000060000000e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000127fffffea; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000060000000e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001201fe01e9; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f008000ea007f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00009f0000009f00; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000bd3d00000000; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000007f00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000007f00000000; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long *)&__m128i_op1[0]) = 0x28bf0351ec69b5f2; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ad00007081; ++ *((unsigned long *)&__m128i_result[0]) = 0x000003510000b5f2; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5252adadadadadad; ++ *((unsigned long *)&__m128i_op1[0]) = 0xadad52525252adad; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000adad0000adad; ++ *((unsigned long *)&__m128i_result[0]) = 0x000052520000adad; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff8000010f78; ++ *((unsigned long *)&__m128i_op1[1]) = 0x002a001a001a000b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001a0000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7500000075007500; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00feff8000ff80ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007d1800007c99; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000f50000007500; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007e1600007d98; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff760386bdae46; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc1fc7941bc7e00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0802080408060803; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff000086bd; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ca000000c481; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000007fff9; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff2356fe165486; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5efeb3165bd7653d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000235600005486; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000b31600006544; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c83e21a22001818; ++ *((unsigned long *)&__m128i_op0[0]) = 0xdd3b8b02563b2d7b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00012c8a0000a58a; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0024d8f6a494006a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001a8beed86; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010024d8f5; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000078c00000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6a57a30ff0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000f0000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000040d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001fffffffe; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x80000000b57ec564; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000083ff0be0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001b57ec563; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000183ff0bdf; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000e2e3ffffd1d3; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000008000e2e3; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200010002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200010002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000010004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4ee85545ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x870968c1f56bb3cd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x870968c1f56bb3cd; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000013d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0006000200000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0006000200000000; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000001b0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000001b0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000001b001b; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000004870ba0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000004870ba0; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000010000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000010000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff8000010f800000; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007fff7fff8000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c +new file mode 100644 +index 000000000..3fade5157 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c +@@ -0,0 +1,450 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffa486c90f; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1f52d710bf295626; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff7f01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x78c00000ff000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff7f01ff01; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfa31dfa21672e711; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1304db85e468073a; ++ *((unsigned long *)&__m128i_op2[1]) = 0x887c8beb969e00f2; ++ *((unsigned long *)&__m128i_op2[0]) = 0x101f8b680b6f8095; ++ *((unsigned long *)&__m128i_result[1]) = 0x7582ed22cb1c6e12; ++ *((unsigned long *)&__m128i_result[0]) = 0x35aaa61c944f34c2; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_result[0]) = 0x5252525252525252; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xc); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op2[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4000400040004002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe01fe01fe01fe01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe01fe01fe01fe01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe01fe01fe01fe01; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe01fe01fe01fe01; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op2[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0xf10cf508f904fd01; ++ *((unsigned long *)&__m128i_result[0]) = 0xf10cf508f904fd01; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffb080ffffb080; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffb080ffffb080; ++ *((unsigned long *)&__m128i_op2[1]) = 0x004fcfcfd01f9f9f; ++ *((unsigned long *)&__m128i_op2[0]) = 0x9f4fcfcfcf800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3504b5fd2dee1f80; ++ *((unsigned long *)&__m128i_result[0]) = 0x4676f70fc0000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf7f7f7ff8e8c6d7e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf7f7f7f7f7f7fbff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xf7f7f7ff8e8c6d7e; ++ *((unsigned long *)&__m128i_result[0]) = 0xf7f7f7f7f7f7fbff; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0fbc1df53c1ae3f9; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff820f81; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xf144e32bc4e61d27; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000020017ef19f; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000004b01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000004b01; ++ __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000ffff0000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffefffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf001f0010101f002; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007f41; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000000000001; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x01ff020000ff03ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x01346b8d00b04c5a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x002affd600000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcbc2723a4f12a5f8; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x01ff020000ff03ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x01346b8d00b04c5a; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080808000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080808000; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000455555555; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007f00ff00ff00fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f00ff00ff00fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xdcec560380000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x08ec7f7f80000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_op2[1]) = 0x32d8f0a905b6c59b; ++ *((unsigned long *)&__m128i_op2[0]) = 0x322a52fc2ba83b96; ++ *((unsigned long *)&__m128i_result[1]) = 0xaa14efac3bb62636; ++ *((unsigned long *)&__m128i_result[0]) = 0xd6c22c8353a80d2c; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op2[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff000000001f1f00; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00009c7c00007176; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00060fbf00040fbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020fbf00000fbf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x9727b8499727b849; ++ *((unsigned long *)&__m128i_op2[0]) = 0x12755900b653f081; ++ *((unsigned long *)&__m128i_result[1]) = 0x00060fbf00040fbf; ++ *((unsigned long *)&__m128i_result[0]) = 0x00020fbf00000fbf; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000021100000211; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfb141d31fb141d31; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op2[1]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_op2[0]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_result[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_result[0]) = 0xbbc8ecc5f3ced5f3; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0674c886fcba4e98; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfdce8003090b0906; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffc0ffc0003f; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffc0ffc0003f003f; ++ *((unsigned long *)&__m128i_op2[1]) = 0x002a05a2f059094a; ++ *((unsigned long *)&__m128i_op2[0]) = 0x05ad3ba576eae048; ++ *((unsigned long *)&__m128i_result[1]) = 0xd4a6cc27d02397ce; ++ *((unsigned long *)&__m128i_result[0]) = 0x24b85f887e903abe; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0700f8ff0700f8ff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000007020701; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000007010701; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8000008680f1ff; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636463abdf17; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e08016161198; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x6363636463abdf17; ++ *((unsigned long *)&__m128i_result[0]) = 0x41f8e08016161198; ++ __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x17c64aaef639f093; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op2[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff800000; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c +new file mode 100644 +index 000000000..d3fd83da7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c +@@ -0,0 +1,472 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000036de0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000003be14000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000000ffff7a53; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000001f0000; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001f0a; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000cdc1; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long *)&__m128i_op2[1]) = 0x05d0ae6002e8748e; ++ *((unsigned long *)&__m128i_op2[0]) = 0xcd1de80217374041; ++ *((unsigned long *)&__m128i_result[1]) = 0xf490ee600180ce20; ++ *((unsigned long *)&__m128i_result[0]) = 0x063bff74fb46e356; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op2[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op2[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0fff0fff0fff0fff; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[0]) = 0x6363636363636363; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffe00029f9f6061; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f5ec0a0feefa0b0; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffe00029fb060b1; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x3); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8d78336c83652b86; ++ *((unsigned long *)&__m128i_op1[0]) = 0x39c51f389c0d6112; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffff0001ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ff9b0082; ++ *((unsigned long *)&__m128i_result[0]) = 0x003a0037fff2fff8; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x05fafe0101fe000e; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_result[0]) = 0x05fafe0101fe000e; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff82bb9784; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc6bb97ac; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7fffffff82bb9784; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fffffffc6bb97ac; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff82bb9784; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffc6bb97ac; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x05d0ba0002e8802e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd005e802174023d6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc000c000c000ff81; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0ba00ba00ba00ba0; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0ba00ba00ba011eb; ++ *((unsigned long *)&__m128i_result[1]) = 0x05d0ae6002e8748e; ++ *((unsigned long *)&__m128i_result[0]) = 0xcd1de80217374041; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00fe00ff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000fff00000e36; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000fef01000e27ca; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x680485c8b304b019; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc89d7f0fed582019; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_op2[1]) = 0x67157b5100005000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x387c7e0a133f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x680485c8b304b019; ++ *((unsigned long *)&__m128i_result[0]) = 0xc89d7f0ff90da019; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01fc020000fe0100; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa23214697fd03f7f; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7c7c9c0000007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7c7c9c0000007176; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x022002101b200203; ++ *((unsigned long *)&__m128i_op0[0]) = 0x022002101b200203; ++ *((unsigned long *)&__m128i_op1[1]) = 0x022002101b200203; ++ *((unsigned long *)&__m128i_op1[0]) = 0x022002101b200203; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000080c43b700; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x036caeeca7592703; ++ *((unsigned long *)&__m128i_result[0]) = 0x022002101b200203; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c83e21a22001818; ++ *((unsigned long *)&__m128i_op1[0]) = 0xdd3b8b02563b2d7b; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000009c83e21a; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000022001818; ++ *((unsigned long *)&__m128i_result[1]) = 0xf2c97aaa7d8fa270; ++ *((unsigned long *)&__m128i_result[0]) = 0x0b73e427f7cfcb88; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0006000200000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7505445465593af1; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0006000200000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000780000007800; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0007000000040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000000010000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf047ef0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80800001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff80800001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff7fff7ef; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80808080ffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffbff8888080a; ++ *((unsigned long *)&__m128i_result[0]) = 0x080803ff807ff7f9; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fc03fc000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fc03fc000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f801fe000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01fe01fd01fd01fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x5d7f5d007f6a007f; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fff7fff7fff7f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000500000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m128i_op2[0]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c +new file mode 100644 +index 000000000..839285685 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c +@@ -0,0 +1,383 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff00000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7e00fe0000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffbffffff85; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffc0000fdfc; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3941248880000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3941248880000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x76f4248880000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000e36400005253; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000035ed0000e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x400000003fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4000000040000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x400000003fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x4000000040000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long *)&__m128i_op0[0]) = 0x78508ad4ec2ffcde; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffdfdc0d; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000000ffdfdc0d; ++ *((unsigned long *)&__m128i_result[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long *)&__m128i_result[0]) = 0x78508ad4ae70fd87; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000440efffff000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000003b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x440ef000440ef000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x4400000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000440efffff000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000003b; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000ffc2f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00201df000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffc2ffe700000007; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffc100010001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00010020fffeffde; ++ *((unsigned long *)&__m128i_op2[0]) = 0x011f57c100201a46; ++ *((unsigned long *)&__m128i_result[1]) = 0x001ffce00016fb41; ++ *((unsigned long *)&__m128i_result[0]) = 0x57cb857100001a46; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7c7c9c0000007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00ff000000001f1f; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7c7c9c0000007176; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc5c53492f25acbf2; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff000000001f1f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_result[0]) = 0xc5c53492f25acbf2; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x34947b4b11684f92; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd73691661e5b68b4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000016f303dff6d2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000016f303dff6d2; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fffffff00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x34947b4b11684f92; ++ *((unsigned long *)&__m128i_result[0]) = 0xee297a731e5c5f86; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff8bc; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff008ff820; ++ *((unsigned long *)&__m128i_result[1]) = 0xffe8008fffe7008f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00010001f1153780; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000021; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op2[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001808281820102; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001808201018081; ++ __m128i_out = __lsx_vmaddwev_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000010100fe0101; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffff0200ffff01ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f7f80807f7f80; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000020302030; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000020302030; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x3fffffffc0000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffff0100000001; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffff0100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000006e17bfd8; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x00010000fffffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000008000e2e3; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000008000e2e3; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080806362; ++ *((unsigned long *)&__m128i_result[0]) = 0x807f808000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffa; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8101010181010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8101010181010101; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000101010015; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffed00010001; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c +new file mode 100644 +index 000000000..bab2c6cf3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c +@@ -0,0 +1,383 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xc0c00000c0c00000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xc0c00c01c2cd0009; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f78787f00f7f700; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000f7f700f7f700; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000400; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000000000040d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000080003f80ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op2[1]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000080003f80ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff81ff82ff810081; ++ *((unsigned long *)&__m128i_op2[0]) = 0xff82ff810081ff81; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x841f000fc28f801f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x841f000fc28f801f; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x76ecfc8b85ac78db; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000fff3; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000000000040d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010400; ++ __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000002b0995850; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80005613; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007f800000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffff80005613; ++ *((unsigned long *)&__m128i_op2[0]) = 0x007f800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff00011cf0c569; ++ *((unsigned long *)&__m128i_result[0]) = 0xc0000002b0995850; ++ __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ffffff81fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff00ffff7e01; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000fffe01fd02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe00fffe86f901; ++ __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000004000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff8004000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffd3000000130000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd3000000130000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffd3000000130000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffd3000000130000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffd3000000130000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffd3000000130000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000f02e1f80f04; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000f02e1f80f04; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x5a57bacbd7e39680; ++ *((unsigned long *)&__m128i_op2[0]) = 0x6bae051ffed76001; ++ *((unsigned long *)&__m128i_result[1]) = 0xf3eb458161080000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffe9454286c0e000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0051005200510052; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0051005200510052; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffaeffaeffaeffae; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffaeffaeffaeffae; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe65ecc1be5bc; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe65ecc1be5bc; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf1f1f1f1865e65a1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff941d; ++ *((unsigned long *)&__m128i_op2[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long *)&__m128i_op2[0]) = 0xf1f1f1f1865e65a1; ++ *((unsigned long *)&__m128i_result[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long *)&__m128i_result[0]) = 0x78508ad4ec2ffcde; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000120000000d; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000cfffffff2; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000dfffffff1; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff80ffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7ffffffeffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000002fe800000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ffffe0100000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe80000000000001; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c +new file mode 100644 +index 000000000..5875aa597 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c +@@ -0,0 +1,372 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000c7fff000c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffff000f0008d3c; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffff0016fff8d3d; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000100f8100002; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff0ff8006f0f950; ++ __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x007ffd0001400840; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007ffd0001400840; ++ __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000002000; ++ __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffac0a000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000200000001b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffac0a000000; ++ __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000017fda829; ++ __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff8000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000800000000ffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long *)&__m128i_op2[0]) = 0xd705c77a7025c899; ++ *((unsigned long *)&__m128i_result[1]) = 0xffcb410000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffeb827ffffffff; ++ __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffffc00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffffc00; ++ __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000c5ac01015b; ++ *((unsigned long *)&__m128i_op1[0]) = 0xaaacac88a3a9a96a; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000f; ++ __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ef4002d21fc7001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x28bf02d1ec6a35b2; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff6080ffff4417; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xff8000007fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long *)&__m128i_result[0]) = 0x28bf0351ec69b5f2; ++ __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001200100012001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xbf80000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1040400000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0961000100000001; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7da9b23a624082fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x03574e39e496cbc9; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001010000; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3f77aab500000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffc100010001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x3f77aab500000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000ffc100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0fbc1df53c1ae3f9; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff820f81; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000ff801c9e; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000810000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000700000004e000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000000012020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000e00a18f5; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000002023dcdc; ++ *((unsigned long *)&__m128i_result[1]) = 0x000700000004e000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000000012020; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000120000000d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000011ffee; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000dfff2; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff7fffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff8000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffff7fffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffff8000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000003fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ff8010000000001; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x01fc020000fe0100; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x78c00000ff000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x78c00000ff000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x78c00000ff000000; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1000100012030e02; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefefefefefefefe; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x010105017878f8f6; ++ *((unsigned long *)&__m128i_op2[0]) = 0xf8f8fd0180810907; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080800000808; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000158; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x328e1080889415a0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3960b1a401811060; ++ *((unsigned long *)&__m128i_op1[1]) = 0x328e1080889415a0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3960b1a401811060; ++ *((unsigned long *)&__m128i_op2[1]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x32f3c7a38f9f4b8b; ++ *((unsigned long *)&__m128i_result[0]) = 0x2c9e5069f5d57780; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c +new file mode 100644 +index 000000000..4be7fce82 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c +@@ -0,0 +1,438 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd8248069ffe78077; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xd8248069ffe78077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xe31c86e90cda86f7; ++ __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00010020fffeffde; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100400100200e68; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00010020fffeffde; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0100400100200e68; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1ff85ffe2ae5d973; ++ *((unsigned long *)&__m128i_result[1]) = 0x00010020fffeffde; ++ *((unsigned long *)&__m128i_result[0]) = 0x011f57c100201a46; ++ __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a09080709080706; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc0800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffc0800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffc0800000; ++ __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_result[0]) = 0x003dc288077c7cc1; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffc0000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc0000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffc0000000000004; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000053a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000036280001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x42a0000042a02001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000036280001; ++ *((unsigned long *)&__m128i_result[0]) = 0x42a0000042a02001; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000ff00fe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000fe00ff; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff946c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff946b; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff3c992b2e; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff730f; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffff946c; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffff946b; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff946c; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffdffff946c; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000401000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002ffff0000ffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff7f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002fffefffd0001; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffe000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c6fde000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xe000e0006080b040; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffe000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c6fde000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_op1[0]) = 0x030298a6a1030a49; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a753500950fa306; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0a753500950fa306; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a753500a9fa0d06; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f7f80807f7f80; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000020302030; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000020302030; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x3fffffffc0000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffff0100000001; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffff0100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000006e17bfd8; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x00010000fffffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000008000e2e3; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000008000e2e3; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080806362; ++ *((unsigned long *)&__m128i_result[0]) = 0x807f808000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffa; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8101010181010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8101010181010101; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000101010015; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffed00010001; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c +new file mode 100644 +index 000000000..8a4c39502 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c +@@ -0,0 +1,460 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00003fe00ffe3fe0; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000b5207f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000b5207f80; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff208fffffa02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffff208fffffa02; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff208fffffa02; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000bd3d00000000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op2[0]) = 0x2020202020207f7f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001021; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op2[0]) = 0xc3818bffe7b7a7b8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000467fe000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000467fef81; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fc0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1e801ffc00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff020000fff4; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fc0000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1e801ffc00000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3c600000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe80000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe80000000000001; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffffff03ffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00013fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000088500000f6a0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fffd00000407; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000442900007b4c; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000e22b0000efa4; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffffff03ffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00013fff; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long *)&__m128i_result[0]) = 0x685670d37e80682a; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc0411fe800000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x601fbfbeffffffff; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffc105d1aa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffbc19ecca; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff3efa; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff43e6; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffa7; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000930400008a10; ++ *((unsigned long *)&__m128i_result[0]) = 0x00006f9100007337; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op2[0]) = 0x001000100010c410; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffff02fff4; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffff02fff4; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7e44bde9b842ff23; ++ *((unsigned long *)&__m128i_result[0]) = 0x00011e80007edff8; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffeffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe0dd268932a5edf9; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe0dd268932a5edf9; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xd8248069ffe78077; ++ *((unsigned long *)&__m128i_result[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long *)&__m128i_result[0]) = 0xbddaa86803e33c2a; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0028280000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0028280000282800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7505853d654185f5; ++ *((unsigned long *)&__m128i_op2[0]) = 0x01010000fefe0101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0028280000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x012927ffff272800; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffff7f00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff007f0101017f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000020000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000183fffffe5; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000073; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000000000002a; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff7f00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff007f0101017f; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_op2[1]) = 0x3f8000003f800001; ++ *((unsigned long *)&__m128i_op2[0]) = 0x3f8000003f800001; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f8000003f800000; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000095896a760000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x006f0efe258ca851; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffff7fc8ffff8000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffff200000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000015516a768038; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff9ed2e1c000; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2000200000013fa0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000013fa0; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080006b00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001b19b1c9c6da5a; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x001b19b1c9c6da5a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x008003496dea0c61; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff80ffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7ffffffeffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000002fe800000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ffffe0100000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe80000000000001; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-addition-vsadd-ins.patch b/LoongArch-Add-tests-for-SX-vector-addition-vsadd-ins.patch new file mode 100644 index 0000000000000000000000000000000000000000..e7e02a17dce65fa467c36cf07645e10246524c51 --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-addition-vsadd-ins.patch @@ -0,0 +1,715 @@ +From 243656b5b87a3125c2a885d11f022a79cca98b39 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 10:07:24 +0800 +Subject: [PATCH 082/124] LoongArch: Add tests for SX vector addition vsadd + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vsadd-1.c | 335 +++++++++++++++++ + .../loongarch/vector/lsx/lsx-vsadd-2.c | 345 ++++++++++++++++++ + 2 files changed, 680 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c +new file mode 100644 +index 000000000..1bc27c983 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c +@@ -0,0 +1,335 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefefefefefefefe; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff3c992b2e; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff730f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff3c992b2e; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff730f; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_result[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000002bfd9461; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001021; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001021; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x80808080806b000b; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3c600000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x3c5fffffff7fffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffeff00feff; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x40f3fa0000000000; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000008a0000008a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000008900000009; ++ *((unsigned long *)&__m128i_op1[1]) = 0x63637687636316bb; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x6363771163631745; ++ *((unsigned long *)&__m128i_result[0]) = 0x636363ec6363636c; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefe6a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fefefe68; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c2bac2c2; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000fffefffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0x028c026bfff027af; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0007000000040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000000010000; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f7fff003f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7fff003f800000; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000820202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00fe01fc0005fff4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000820205a44; ++ *((unsigned long *)&__m128i_result[0]) = 0x013bc084078278b5; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000140001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000140001; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x67eb85b0b2ebb001; ++ *((unsigned long *)&__m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128i_op1[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff14eb54ab; ++ *((unsigned long *)&__m128i_result[0]) = 0x14ea6a002a406a00; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xce9035c49ffff570; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[0]) = 0xce9035c49ffff574; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000040d; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001000000ff; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000300000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000002fffffffb; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010000fffb; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c +new file mode 100644 +index 000000000..67d189991 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c +@@ -0,0 +1,345 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x203e16d116de012b; ++ *((unsigned long *)&__m128i_result[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_result[0]) = 0x203e16d116de012b; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffebd06fffe820c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7ffe7fff3506; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffebd06fffe820c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7ffe7fff3506; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0cffffff18; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefffefffeff6a0c; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffff60ca7104649; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff790a15db63d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffff60ca710464a; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff790a15db63e; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff46; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00fe000100cf005f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5f675e96e29a5a60; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x5fff5e97e2ff5abf; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefffefffefffeff; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000100010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001001100110068; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_result[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfeffffffffffffff; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffb81a6f70; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000d48eaa1a2; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffb81ae0bf; ++ *((unsigned long *)&__m128i_result[0]) = 0x00012c9748eaffff; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000011ff8bc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000d0000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8006000000040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8002000000000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x8006000000040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8002000d00000014; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x1); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000600007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000008ffffa209; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000600007fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000008ffffa209; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x636363633f3e47c1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e080f1ef4eaa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000807bf0a1f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000800ecedee68; ++ *((unsigned long *)&__m128i_result[1]) = 0x63636b6afe486741; ++ *((unsigned long *)&__m128i_result[0]) = 0x41f8e880ffffffff; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000e29e; ++ *((unsigned long *)&__m128i_result[0]) = 0x000259140000ffff; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffeffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffeffffffff; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c03e17edd781b11; ++ *((unsigned long *)&__m128i_op0[0]) = 0x342caf9be55700b5; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00040003ff83ff84; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00040003ff4dffca; ++ *((unsigned long *)&__m128i_result[1]) = 0x0c07e181ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x3430af9effffffff; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffa8ff9f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffffabff99; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000100000002007d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000020001; ++ *((unsigned long *)&__m128i_result[1]) = 0x00010000ffab001c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001ffffffadff9a; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0800080008000800; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x76f424887fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc110000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc00d060000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xc110000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff7fffffff; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfbfbfb17fbfb38ea; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfbfb47fbfbfb0404; ++ *((unsigned long *)&__m128i_result[1]) = 0xfbfbfb17fbfb3919; ++ *((unsigned long *)&__m128i_result[0]) = 0xfbfb47fbfbfb042d; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808081; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x80808080ffffffff; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00123fff00120012; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0012001200120012; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000005003a; ++ *((unsigned long *)&__m128i_result[1]) = 0x00123fff00120012; ++ *((unsigned long *)&__m128i_result[0]) = 0x001200120017004c; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc5c534920000c4ed; ++ *((unsigned long *)&__m128i_result[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_result[0]) = 0xc5c534920000c4ed; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000aa822a79308f6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000aa822a79308f6; ++ *((unsigned long *)&__m128i_op1[0]) = 0x03aa558e1d37b5a1; ++ *((unsigned long *)&__m128i_result[1]) = 0x00155044ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x03aa558e2584c86f; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030298a6a1030a49; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_result[0]) = 0x030298a6a1030a49; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007a8000000480; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000485000004cc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00007a8000000480; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000485000004cc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000090a00000998; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004eff6200d2ff76; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff70002800be00a0; ++ *((unsigned long *)&__m128i_result[1]) = 0x004eff6200d2ff76; ++ *((unsigned long *)&__m128i_result[0]) = 0xff70002800be00a0; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-floating-point-ari.patch b/LoongArch-Add-tests-for-SX-vector-floating-point-ari.patch new file mode 100644 index 0000000000000000000000000000000000000000..5c4d6f361e5a592bacd982f9d0a51ca659aa188c --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-floating-point-ari.patch @@ -0,0 +1,2928 @@ +From 4ccb21b6d2d23046c6a71c4540a1eb288609f041 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 11:25:20 +0800 +Subject: [PATCH 093/124] LoongArch: Add tests for SX vector floating point + arithmetic instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vfadd_d.c | 407 +++++++++++++++ + .../loongarch/vector/lsx/lsx-vfadd_s.c | 470 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vfclass_d.c | 83 ++++ + .../loongarch/vector/lsx/lsx-vfclass_s.c | 74 +++ + .../loongarch/vector/lsx/lsx-vflogb_d.c | 76 +++ + .../loongarch/vector/lsx/lsx-vflogb_s.c | 185 +++++++ + .../loongarch/vector/lsx/lsx-vfmax_d.c | 200 ++++++++ + .../loongarch/vector/lsx/lsx-vfmax_s.c | 335 +++++++++++++ + .../loongarch/vector/lsx/lsx-vfmaxa_d.c | 155 ++++++ + .../loongarch/vector/lsx/lsx-vfmaxa_s.c | 230 +++++++++ + .../loongarch/vector/lsx/lsx-vfsqrt_d.c | 216 ++++++++ + .../loongarch/vector/lsx/lsx-vfsqrt_s.c | 372 ++++++++++++++ + 12 files changed, 2803 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c +new file mode 100644 +index 000000000..7ffbd385e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c +@@ -0,0 +1,407 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000fea8ff44; ++ *((unsigned long *)&__m128d_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128d_op1[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m128d_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128d_result[0]) = 0x2020202020202020; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128d_result[0]) = 0x1000100010001000; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x000000000000000f; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000010100fe0101; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffff0200ffff01ff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0001010100fe0100; ++ *((unsigned long *)&__m128d_result[0]) = 0xffff0200ffff01ff; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7fff0101ffffe000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fffffffa0204000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7f370101ff04ffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7f3bffffa0226021; ++ *((unsigned long *)&__m128d_result[1]) = 0x7fff0101ffffe000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7fffffffa0204000; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128d_op1[1]) = 0xf654ad7447e59090; ++ *((unsigned long *)&__m128d_op1[0]) = 0x27b1b106b8145f50; ++ *((unsigned long *)&__m128d_result[1]) = 0xf654ad7447e59090; ++ *((unsigned long *)&__m128d_result[0]) = 0x27b1b106b8145f50; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000100000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x1000100000001000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000100000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x1000100000001000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000007000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffab7e71e33848; ++ *((unsigned long *)&__m128d_op1[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffab7e71e33848; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffab7e71e33848; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128d_result[1]) = 0x800000ff000000ff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000fff80000; ++ *((unsigned long *)&__m128d_result[1]) = 0x80000000fff8fff8; ++ *((unsigned long *)&__m128d_result[0]) = 0x80000000fff80000; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op1[1]) = 0xb55ccf30f52a6a68; ++ *((unsigned long *)&__m128d_op1[0]) = 0x4e0018eceb82c53a; ++ *((unsigned long *)&__m128d_result[1]) = 0x355ccf30f52a6a68; ++ *((unsigned long *)&__m128d_result[0]) = 0xce0018eceb82c53a; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffff00006c82; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00009b140000917b; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffff00006c82; ++ *((unsigned long *)&__m128d_result[0]) = 0x00009b140000917b; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000100000020; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000083b00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long *)&__m128d_op0[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long *)&__m128d_op1[1]) = 0xe6d4572c8a5835bc; ++ *((unsigned long *)&__m128d_op1[0]) = 0xe5017c2ac9ca9fd0; ++ *((unsigned long *)&__m128d_result[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long *)&__m128d_result[0]) = 0x65017c2ac9ca9fd0; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long *)&__m128d_op0[0]) = 0x65017c2ac9ca9fd0; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00008bf700017052; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000f841000091aa; ++ *((unsigned long *)&__m128d_result[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long *)&__m128d_result[0]) = 0x65017c2ac9ca9fd0; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000004000000002; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x5555410154551515; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0004455501500540; ++ *((unsigned long *)&__m128d_result[1]) = 0xd555410154551515; ++ *((unsigned long *)&__m128d_result[0]) = 0x8004455501500540; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000300037ff000ff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0003000300a10003; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0003000300000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0003000300a10003; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x56a09e662ab46b31; ++ *((unsigned long *)&__m128d_op1[0]) = 0xb4b8122ef4054bb3; ++ *((unsigned long *)&__m128d_result[1]) = 0xd6a09e662ab46b31; ++ *((unsigned long *)&__m128d_result[0]) = 0x34b8122ef4054bb3; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7f4000007f040000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7f0200007f020000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128d_result[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffff01018888; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000100007f01; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffefefffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0400000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffefefffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000ff801c9e; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000810000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x40eff02383e383e4; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000cd630000cd63; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffff00000000ffff; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000aa822a79308f6; ++ *((unsigned long *)&__m128d_op1[0]) = 0x03aa558e1d37b5a1; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffefffe011df03e; ++ *((unsigned long *)&__m128d_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128d_result[0]) = 0xfffffffefffffffe; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c +new file mode 100644 +index 000000000..388430278 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c +@@ -0,0 +1,470 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x05050505; ++ *((int *)&__m128_op0[2]) = 0x05050505; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x05050000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x03574e38; ++ *((int *)&__m128_op1[0]) = 0xe496cbc9; ++ *((int *)&__m128_result[3]) = 0x05050505; ++ *((int *)&__m128_result[2]) = 0x05050505; ++ *((int *)&__m128_result[1]) = 0x03574e38; ++ *((int *)&__m128_result[0]) = 0xe496cbc9; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000000f; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00077f88; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00077f97; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x000000ff; ++ *((int *)&__m128_op0[0]) = 0x000000ff; ++ *((int *)&__m128_op1[3]) = 0x370bdfec; ++ *((int *)&__m128_op1[2]) = 0xffecffec; ++ *((int *)&__m128_op1[1]) = 0x370bdfec; ++ *((int *)&__m128_op1[0]) = 0xffecffec; ++ *((int *)&__m128_result[3]) = 0x370bdfec; ++ *((int *)&__m128_result[2]) = 0xffecffec; ++ *((int *)&__m128_result[1]) = 0x370bdfec; ++ *((int *)&__m128_result[0]) = 0xffecffec; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x0000ff00; ++ *((int *)&__m128_op1[0]) = 0x00ff0000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffff0000; ++ *((int *)&__m128_op0[2]) = 0xffff0000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x40088040; ++ *((int *)&__m128_op1[2]) = 0x80040110; ++ *((int *)&__m128_op1[1]) = 0x40408010; ++ *((int *)&__m128_op1[0]) = 0x80200110; ++ *((int *)&__m128_result[3]) = 0xffff0000; ++ *((int *)&__m128_result[2]) = 0xffff0000; ++ *((int *)&__m128_result[1]) = 0x40408010; ++ *((int *)&__m128_result[0]) = 0x80200110; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xfffffffc; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xfffffffc; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xfffffffc; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xfffffffc; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000001b; ++ *((int *)&__m128_op0[2]) = 0x0000001b; ++ *((int *)&__m128_op0[1]) = 0x0000001b; ++ *((int *)&__m128_op0[0]) = 0x0000001b; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x0000001b; ++ *((int *)&__m128_result[2]) = 0x0000001b; ++ *((int *)&__m128_result[1]) = 0x0000001b; ++ *((int *)&__m128_result[0]) = 0x0000001b; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x56411278; ++ *((int *)&__m128_op0[2]) = 0x43c0d41e; ++ *((int *)&__m128_op0[1]) = 0x0124d8f6; ++ *((int *)&__m128_op0[0]) = 0xa494006b; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x05010501; ++ *((int *)&__m128_op1[2]) = 0x05010501; ++ *((int *)&__m128_op1[1]) = 0x05010501; ++ *((int *)&__m128_op1[0]) = 0x0501050c; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x21f32eaf; ++ *((int *)&__m128_op0[2]) = 0x5b7a02c8; ++ *((int *)&__m128_op0[1]) = 0x407c2ca3; ++ *((int *)&__m128_op0[0]) = 0x2cbd0357; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00010400; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffe0001; ++ *((int *)&__m128_op0[2]) = 0xfffe0001; ++ *((int *)&__m128_op0[1]) = 0xfffe0001; ++ *((int *)&__m128_op0[0]) = 0xfffe0001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xfffe0001; ++ *((int *)&__m128_result[2]) = 0xfffe0001; ++ *((int *)&__m128_result[1]) = 0xfffe0001; ++ *((int *)&__m128_result[0]) = 0xfffe0001; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00002ebf; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x01000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00081f1f; ++ *((int *)&__m128_op0[2]) = 0x1f1f1f1f; ++ *((int *)&__m128_op0[1]) = 0x1f1f1f1f; ++ *((int *)&__m128_op0[0]) = 0x1f1f1f1f; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x021b7d24; ++ *((int *)&__m128_op0[2]) = 0x49678a35; ++ *((int *)&__m128_op0[1]) = 0x030298a6; ++ *((int *)&__m128_op0[0]) = 0x21030a49; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000002; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf6548a17; ++ *((int *)&__m128_op0[2]) = 0x47e59090; ++ *((int *)&__m128_op0[1]) = 0x27b169bb; ++ *((int *)&__m128_op0[0]) = 0xb8145f50; ++ *((int *)&__m128_op1[3]) = 0x004eff62; ++ *((int *)&__m128_op1[2]) = 0x00d2ff76; ++ *((int *)&__m128_op1[1]) = 0xff700028; ++ *((int *)&__m128_op1[0]) = 0x00be00a0; ++ *((int *)&__m128_result[3]) = 0xb7032c34; ++ *((int *)&__m128_result[2]) = 0x093d35ab; ++ *((int *)&__m128_result[1]) = 0xe7a6533b; ++ *((int *)&__m128_result[0]) = 0x800001b8; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsub_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7fff0101; ++ *((int *)&__m128_op0[2]) = 0x81010102; ++ *((int *)&__m128_op0[1]) = 0x7fffffff; ++ *((int *)&__m128_op0[0]) = 0x81010102; ++ *((int *)&__m128_op1[3]) = 0x00000fff; ++ *((int *)&__m128_op1[2]) = 0xffffe000; ++ *((int *)&__m128_op1[1]) = 0x00001020; ++ *((int *)&__m128_op1[0]) = 0x20204000; ++ *((int *)&__m128_result[3]) = 0x7fff0101; ++ *((int *)&__m128_result[2]) = 0xffffe000; ++ *((int *)&__m128_result[1]) = 0x7fffffff; ++ *((int *)&__m128_result[0]) = 0xa0204000; ++ __m128_out = __lsx_vfsub_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000fff; ++ *((int *)&__m128_op1[2]) = 0xffffe000; ++ *((int *)&__m128_op1[1]) = 0x00001020; ++ *((int *)&__m128_op1[0]) = 0x20204000; ++ *((int *)&__m128_result[3]) = 0x80000fff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0x80001020; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfsub_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsub_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x54feed87; ++ *((int *)&__m128_op0[2]) = 0xbc3f2be1; ++ *((int *)&__m128_op0[1]) = 0x8064d8f6; ++ *((int *)&__m128_op0[0]) = 0xa494afcb; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xd8248069; ++ *((int *)&__m128_op0[0]) = 0x7f678077; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xd8248069; ++ *((int *)&__m128_op1[0]) = 0x7f678077; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x7fc00000; ++ *((int *)&__m128_result[1]) = 0x3f800000; ++ *((int *)&__m128_result[0]) = 0x3f800000; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x7fc00000; ++ *((int *)&__m128_result[1]) = 0x7fc00000; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00070000; ++ *((int *)&__m128_op0[2]) = 0x00040000; ++ *((int *)&__m128_op0[1]) = 0x00030000; ++ *((int *)&__m128_op0[0]) = 0x00010000; ++ *((int *)&__m128_op1[3]) = 0x00070000; ++ *((int *)&__m128_op1[2]) = 0x00040000; ++ *((int *)&__m128_op1[1]) = 0x00030000; ++ *((int *)&__m128_op1[0]) = 0x00010000; ++ *((int *)&__m128_result[3]) = 0x3f800000; ++ *((int *)&__m128_result[2]) = 0x3f800000; ++ *((int *)&__m128_result[1]) = 0x3f800000; ++ *((int *)&__m128_result[0]) = 0x3f800000; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00010001; ++ *((int *)&__m128_op1[2]) = 0x0001007c; ++ *((int *)&__m128_op1[1]) = 0x00010001; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00001fff; ++ *((int *)&__m128_op0[2]) = 0x00001fff; ++ *((int *)&__m128_op0[1]) = 0x00000003; ++ *((int *)&__m128_op0[0]) = 0xfffffffc; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0xfffffffc; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x7fc00000; ++ *((int *)&__m128_result[1]) = 0x7fc00000; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c +new file mode 100644 +index 000000000..9706d7adc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c +@@ -0,0 +1,83 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000008; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long *)&__m128d_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000080; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c +new file mode 100644 +index 000000000..7166f954b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c +@@ -0,0 +1,74 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x7fff8000; ++ *((int *)&__m128_op0[1]) = 0x00010081; ++ *((int *)&__m128_op0[0]) = 0x00000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000020000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000100; ++ __m128i_out = __lsx_vfclass_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfe02fe02; ++ *((int *)&__m128_op0[2]) = 0xfe02fe02; ++ *((int *)&__m128_op0[1]) = 0xfe02fe02; ++ *((int *)&__m128_op0[0]) = 0xfe02fe02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000800000008; ++ __m128i_out = __lsx_vfclass_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000000c; ++ *((int *)&__m128_op0[2]) = 0x7fff000c; ++ *((int *)&__m128_op0[1]) = 0x10001000; ++ *((int *)&__m128_op0[0]) = 0x10001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000008000000080; ++ __m128i_out = __lsx_vfclass_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000020000000200; ++ __m128i_out = __lsx_vfclass_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000020000000200; ++ __m128i_out = __lsx_vfclass_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0c0b0a09; ++ *((int *)&__m128_op0[2]) = 0x0b0a0908; ++ *((int *)&__m128_op0[1]) = 0x0a090807; ++ *((int *)&__m128_op0[0]) = 0x09080706; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000008000000080; ++ __m128i_out = __lsx_vfclass_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c +new file mode 100644 +index 000000000..cc36bf136 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c +@@ -0,0 +1,76 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xc090c40000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000001000000048; ++ *((unsigned long *)&__m128d_result[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128d_result[0]) = 0xc090380000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c +new file mode 100644 +index 000000000..624589620 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c +@@ -0,0 +1,185 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00003004; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xc3080000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x01010101; ++ *((int *)&__m128_op0[2]) = 0x01010101; ++ *((int *)&__m128_op0[1]) = 0x01010101; ++ *((int *)&__m128_op0[0]) = 0x01010101; ++ *((int *)&__m128_result[3]) = 0xc2fa0000; ++ *((int *)&__m128_result[2]) = 0xc2fa0000; ++ *((int *)&__m128_result[1]) = 0xc2fa0000; ++ *((int *)&__m128_result[0]) = 0xc2fa0000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x01ff01ff; ++ *((int *)&__m128_op0[2]) = 0x01ff01ff; ++ *((int *)&__m128_op0[1]) = 0x01ff01ff; ++ *((int *)&__m128_op0[0]) = 0x01ff01ff; ++ *((int *)&__m128_result[3]) = 0xc2f80000; ++ *((int *)&__m128_result[2]) = 0xc2f80000; ++ *((int *)&__m128_result[1]) = 0xc2f80000; ++ *((int *)&__m128_result[0]) = 0xc2f80000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xd46cdc13; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00fe00fe; ++ *((int *)&__m128_op0[2]) = 0x000200fe; ++ *((int *)&__m128_op0[1]) = 0x00fe00fe; ++ *((int *)&__m128_op0[0]) = 0x000200fe; ++ *((int *)&__m128_result[3]) = 0xc2fc0000; ++ *((int *)&__m128_result[2]) = 0xc3040000; ++ *((int *)&__m128_result[1]) = 0xc2fc0000; ++ *((int *)&__m128_result[0]) = 0xc3040000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x01010101; ++ *((int *)&__m128_op0[0]) = 0x00000100; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xc2fa0000; ++ *((int *)&__m128_result[0]) = 0xc30d0000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000014; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000014; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xc3110000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xc3110000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x4e3e1337; ++ *((int *)&__m128_op0[0]) = 0x38bb47d2; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0x41e80000; ++ *((int *)&__m128_result[0]) = 0xc1600000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00003ff8; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xc3080000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf1f181a2; ++ *((int *)&__m128_op0[2]) = 0xf1f1f1b0; ++ *((int *)&__m128_op0[1]) = 0xf1f1f1f1; ++ *((int *)&__m128_op0[0]) = 0xf180f1f1; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x7fc00000; ++ *((int *)&__m128_result[1]) = 0x7fc00000; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c +new file mode 100644 +index 000000000..442473fb4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c +@@ -0,0 +1,200 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m128d_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128d_result[0]) = 0x0400040004000400; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m128d_result[0]) = 0x01ff01ff01ff01ff; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128d_result[0]) = 0xfffcfffcfffcfffc; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x000000000000ffff; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128d_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfdfef9ff0efff900; ++ *((unsigned long *)&__m128d_result[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128d_result[0]) = 0x6363636363636363; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128d_op0[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128d_result[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128d_result[0]) = 0x377b810912c0e000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128d_op0[0]) = 0xc3818bffe7b7a7b8; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x17c64aaef639f093; ++ *((unsigned long *)&__m128d_op0[0]) = 0xdb8f439722ec502d; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x17c64aaef639f093; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long *)&__m128d_result[0]) = 0x00000000ff800000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000c000ffffc000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000958affff995d; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c +new file mode 100644 +index 000000000..876588827 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c +@@ -0,0 +1,335 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x0000ffff; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x0000ffff; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xc2409eda; ++ *((int *)&__m128_op1[2]) = 0xb019323f; ++ *((int *)&__m128_op1[1]) = 0x460f3b39; ++ *((int *)&__m128_op1[0]) = 0x3ef4be3a; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x460f3b39; ++ *((int *)&__m128_result[0]) = 0x3ef4be3a; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000001; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000001; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xfefd7f7f; ++ *((int *)&__m128_op1[2]) = 0x7f7f7f7e; ++ *((int *)&__m128_op1[1]) = 0xdffdbffe; ++ *((int *)&__m128_op1[0]) = 0xba6f5543; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x7f7f7f7e; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff84fff4; ++ *((int *)&__m128_op0[2]) = 0xff84fff4; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffff0; ++ *((int *)&__m128_op1[3]) = 0xff84fff4; ++ *((int *)&__m128_op1[2]) = 0xff84fff4; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xfffffff0; ++ *((int *)&__m128_result[3]) = 0xffc4fff4; ++ *((int *)&__m128_result[2]) = 0xffc4fff4; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xfffffff0; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00007fff; ++ *((int *)&__m128_op1[2]) = 0x00007fff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00007fff; ++ *((int *)&__m128_result[2]) = 0x00007fff; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000001; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x01010001; ++ *((int *)&__m128_op0[0]) = 0x01010001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00020000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00020000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00020000; ++ *((int *)&__m128_result[1]) = 0x01010001; ++ *((int *)&__m128_result[0]) = 0x01010001; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000020; ++ *((int *)&__m128_op1[2]) = 0x00000020; ++ *((int *)&__m128_op1[1]) = 0x0000001f; ++ *((int *)&__m128_op1[0]) = 0x0000001f; ++ *((int *)&__m128_result[3]) = 0x00000020; ++ *((int *)&__m128_result[2]) = 0x00000020; ++ *((int *)&__m128_result[1]) = 0x0000001f; ++ *((int *)&__m128_result[0]) = 0x0000001f; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xf3040705; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xf3040705; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0xf3040705; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000004; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000004; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000004; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000004; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000004; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000004; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000ffff; ++ *((int *)&__m128_op0[2]) = 0x0000ffff; ++ *((int *)&__m128_op0[1]) = 0x0000ffff; ++ *((int *)&__m128_op0[0]) = 0x0000fffe; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffe5; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffe5; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x13121110; ++ *((int *)&__m128_op0[2]) = 0x1211100f; ++ *((int *)&__m128_op0[1]) = 0x11100f0e; ++ *((int *)&__m128_op0[0]) = 0x100f0e0d; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffff3; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000008; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000088; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000008; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000088; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x52525252; ++ *((int *)&__m128_op0[2]) = 0xadadadad; ++ *((int *)&__m128_op0[1]) = 0x52525252; ++ *((int *)&__m128_op0[0]) = 0xadadadad; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0xadadadad; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0xadadadad; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000ffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x0000ffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x0000ffff; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c +new file mode 100644 +index 000000000..c2766d5c6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c +@@ -0,0 +1,155 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000800000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128d_op0[0]) = 0x3918371635143312; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000af555555555; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000af555555555; ++ *((unsigned long *)&__m128d_result[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128d_result[0]) = 0x3918371635143312; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000010000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x10f8000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128d_result[1]) = 0x10f8000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x00000000ff800000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x80000000fff6fc00; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000080000000; ++ __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000158; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffe0004fffe0004; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x002a001a001a000b; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x002a001a001a000b; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c +new file mode 100644 +index 000000000..5fcdedd3f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c +@@ -0,0 +1,230 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xf436f3f5; ++ *((int *)&__m128_op0[0]) = 0x2f4ef4a8; ++ *((int *)&__m128_op1[3]) = 0xff800000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xff800000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0x2f4ef4a8; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000800; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000800; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000800; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000800; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xc0c0c000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00800080; ++ *((int *)&__m128_op1[2]) = 0x00800080; ++ *((int *)&__m128_op1[1]) = 0x0080006b; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00800080; ++ *((int *)&__m128_result[2]) = 0xc0c0c000; ++ *((int *)&__m128_result[1]) = 0x0080006b; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x80000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x80000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xff01ff01; ++ *((int *)&__m128_op1[2]) = 0x0000ff7d; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x0000fffc; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xdfa6e0c6; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xd46cdc13; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x01010101; ++ *((int *)&__m128_op0[2]) = 0x01010101; ++ *((int *)&__m128_op0[1]) = 0x010101fe; ++ *((int *)&__m128_op0[0]) = 0x0101fe87; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffff0000; ++ *((int *)&__m128_op1[2]) = 0xffff0000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c +new file mode 100644 +index 000000000..8a35dfe24 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c +@@ -0,0 +1,216 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffa486c90f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000058bcc201; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffa486c90f; ++ *((unsigned long *)&__m128d_result[0]) = 0x1f52d710bf295626; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffff01ff01; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000be00be; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x1f1b917c9f3d5e05; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000001400000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x1f81e3779b97f4a8; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128d_op0[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128d_result[1]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128d_result[0]) = 0x2006454690d3de87; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128d_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfrsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001ffff00000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x5ff6a0a40ea8f47c; ++ *((unsigned long *)&__m128d_result[0]) = 0x5ff6a0a40e9da42a; ++ __m128d_out = __lsx_vfrsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x61608654a2d4f6da; ++ __m128d_out = __lsx_vfrsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00fe000100cf005f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128d_result[1]) = 0x5f675e96e29a5a60; ++ *((unsigned long *)&__m128d_result[0]) = 0x7fff7fff7fff7fff; ++ __m128d_out = __lsx_vfrsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000fffa0000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000fffa0000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long *)&__m128d_result[1]) = 0x805ffffe01001fe0; ++ *((unsigned long *)&__m128d_result[0]) = 0x9a49e11102834d70; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128d_op0[0]) = 0x5252dcdcdcdcdcdc; ++ *((unsigned long *)&__m128d_result[1]) = 0x2d8bf1f8fc7e3f20; ++ *((unsigned long *)&__m128d_result[0]) = 0x2d8b24b936d1b24d; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c +new file mode 100644 +index 000000000..ffd80540b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c +@@ -0,0 +1,372 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0xfe07e5fe; ++ *((int *)&__m128_op0[2]) = 0xfefdddfe; ++ *((int *)&__m128_op0[1]) = 0x00020100; ++ *((int *)&__m128_op0[0]) = 0xfedd0c00; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x7fc00000; ++ *((int *)&__m128_result[1]) = 0x1e801ffc; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff00ff00; ++ *((int *)&__m128_op0[2]) = 0xff00ff00; ++ *((int *)&__m128_op0[1]) = 0xff00ff00; ++ *((int *)&__m128_op0[0]) = 0xff00ff00; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x7fc00000; ++ *((int *)&__m128_result[1]) = 0x7fc00000; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x8c7fc73a; ++ *((int *)&__m128_op0[2]) = 0x137e54af; ++ *((int *)&__m128_op0[1]) = 0xbc84cf6f; ++ *((int *)&__m128_op0[0]) = 0x76208329; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x297f29fe; ++ *((int *)&__m128_result[1]) = 0x7fc00000; ++ *((int *)&__m128_result[0]) = 0x5acab5a5; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffff9727; ++ *((int *)&__m128_op0[2]) = 0xffff9727; ++ *((int *)&__m128_op0[1]) = 0xfffffe79; ++ *((int *)&__m128_op0[0]) = 0xffffba5f; ++ *((int *)&__m128_result[3]) = 0xffff9727; ++ *((int *)&__m128_result[2]) = 0xffff9727; ++ *((int *)&__m128_result[1]) = 0xfffffe79; ++ *((int *)&__m128_result[0]) = 0xffffba5f; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xfff8fff8; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xfff80000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0xfff8fff8; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0xfff80000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x1f1b917c; ++ *((int *)&__m128_op0[0]) = 0x9f3d5e05; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x4fa432d6; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x12835580; ++ *((int *)&__m128_op0[0]) = 0xb880eb98; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0x55fcbad1; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x06070607; ++ *((int *)&__m128_op0[2]) = 0x00000807; ++ *((int *)&__m128_op0[1]) = 0x0707f8f8; ++ *((int *)&__m128_op0[0]) = 0x03e8157e; ++ *((int *)&__m128_result[3]) = 0x5c303f97; ++ *((int *)&__m128_result[2]) = 0x61ff9049; ++ *((int *)&__m128_result[1]) = 0x5bafa1dd; ++ *((int *)&__m128_result[0]) = 0x5d3e1e1d; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfff7fffe; ++ *((int *)&__m128_op0[2]) = 0xfffa01ff; ++ *((int *)&__m128_op0[1]) = 0xfffbfffe; ++ *((int *)&__m128_op0[0]) = 0xfffe01ff; ++ *((int *)&__m128_result[3]) = 0xfff7fffe; ++ *((int *)&__m128_result[2]) = 0xfffa01ff; ++ *((int *)&__m128_result[1]) = 0xfffbfffe; ++ *((int *)&__m128_result[0]) = 0xfffe01ff; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x45000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x44000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x3cb504f3; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x3d3504f3; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00020001; ++ *((int *)&__m128_op0[0]) = 0x00020002; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x607fffc0; ++ *((int *)&__m128_result[0]) = 0x607fff80; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000002; ++ *((int *)&__m128_op0[2]) = 0x00000002; ++ *((int *)&__m128_op0[1]) = 0x00000003; ++ *((int *)&__m128_op0[0]) = 0x00000003; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf6e91c00; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x51cfd7c0; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x880c91b8; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x2d1da85b; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffffffa; ++ *((int *)&__m128_op0[2]) = 0xfffffffa; ++ *((int *)&__m128_op0[1]) = 0xfffffffa; ++ *((int *)&__m128_op0[0]) = 0xfffffffa; ++ *((int *)&__m128_result[3]) = 0xfffffffa; ++ *((int *)&__m128_result[2]) = 0xfffffffa; ++ *((int *)&__m128_result[1]) = 0xfffffffa; ++ *((int *)&__m128_result[0]) = 0xfffffffa; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffff0001; ++ *((int *)&__m128_op0[2]) = 0xffff0001; ++ *((int *)&__m128_op0[1]) = 0xffff0001; ++ *((int *)&__m128_op0[0]) = 0xffff0001; ++ *((int *)&__m128_result[3]) = 0xffff0001; ++ *((int *)&__m128_result[2]) = 0xffff0001; ++ *((int *)&__m128_result[1]) = 0xffff0001; ++ *((int *)&__m128_result[0]) = 0xffff0001; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0a000000; ++ *((int *)&__m128_op0[2]) = 0x0a000000; ++ *((int *)&__m128_op0[1]) = 0x0a000000; ++ *((int *)&__m128_op0[0]) = 0x0a000000; ++ *((int *)&__m128_result[3]) = 0x75000000; ++ *((int *)&__m128_result[2]) = 0x75000000; ++ *((int *)&__m128_result[1]) = 0x75000000; ++ *((int *)&__m128_result[0]) = 0x75000000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-floating-point-ins.patch b/LoongArch-Add-tests-for-SX-vector-floating-point-ins.patch new file mode 100644 index 0000000000000000000000000000000000000000..35f73308f6ac107d28704b3602511568c51e0363 --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-floating-point-ins.patch @@ -0,0 +1,4316 @@ +From f9098b58fe79ba960e41b7ec6a05ba2ea18ca02e Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 09:42:39 +0800 +Subject: [PATCH 079/124] LoongArch: Add tests for SX vector floating-point + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vffint-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vffint-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vffint-3.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vftint-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vftint-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vftint-3.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vftint-4.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vfcvt-1.c | 398 +++++++ + .../loongarch/vector/lsx/lsx-vfcvt-2.c | 278 +++++ + .../loongarch/vector/lsx/lsx-vffint-1.c | 161 +++ + .../loongarch/vector/lsx/lsx-vffint-2.c | 264 +++++ + .../loongarch/vector/lsx/lsx-vffint-3.c | 102 ++ + .../loongarch/vector/lsx/lsx-vfrint_d.c | 230 ++++ + .../loongarch/vector/lsx/lsx-vfrint_s.c | 350 ++++++ + .../loongarch/vector/lsx/lsx-vftint-1.c | 349 ++++++ + .../loongarch/vector/lsx/lsx-vftint-2.c | 695 +++++++++++ + .../loongarch/vector/lsx/lsx-vftint-3.c | 1028 +++++++++++++++++ + .../loongarch/vector/lsx/lsx-vftint-4.c | 345 ++++++ + 11 files changed, 4200 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c +new file mode 100644 +index 000000000..d4a86e262 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c +@@ -0,0 +1,398 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00e0000000e00000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002a55005501; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002a55000001; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x36280000; ++ *((int *)&__m128_result[1]) = 0x42a00000; ++ *((int *)&__m128_result[0]) = 0x42a02000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xf436f3f5; ++ *((int *)&__m128_op0[0]) = 0x2f4ef4a8; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcfb799f1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0282800002828282; ++ *((int *)&__m128_result[3]) = 0xffffe000; ++ *((int *)&__m128_result[2]) = 0xffffe000; ++ *((int *)&__m128_result[1]) = 0xc1f6e000; ++ *((int *)&__m128_result[0]) = 0xbb3e2000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000040004000100; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x36de0000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x3be14000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x41dfffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x403be000; ++ *((int *)&__m128_result[2]) = 0xffffe000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x63637687; ++ *((int *)&__m128_op0[2]) = 0x636316bb; ++ *((int *)&__m128_op0[1]) = 0x63636363; ++ *((int *)&__m128_op0[0]) = 0x63636363; ++ *((unsigned long *)&__m128d_result[1]) = 0x446c6ed0e0000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x446c62d760000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x000000ff; ++ *((int *)&__m128_op0[2]) = 0x000000ff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x371fe00000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x371fe00000000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff7fff7ef; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80808080ffffffff; ++ *((int *)&__m128_result[3]) = 0xffffe000; ++ *((int *)&__m128_result[2]) = 0xffffe000; ++ *((int *)&__m128_result[1]) = 0xc6ffe000; ++ *((int *)&__m128_result[0]) = 0xc6fde000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffe0000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffe1ffc100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000400000; ++ *((int *)&__m128_result[3]) = 0xfffc2000; ++ *((int *)&__m128_result[2]) = 0xfff82000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000b3a6; ++ *((int *)&__m128_op0[2]) = 0x000067da; ++ *((int *)&__m128_op0[1]) = 0x00004e42; ++ *((int *)&__m128_op0[0]) = 0x0000c26a; ++ *((unsigned long *)&__m128d_result[1]) = 0x379674c000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x3789f68000000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffff0000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffe0000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001001001000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4195d926d8018000; ++ *((int *)&__m128_result[3]) = 0x33800000; ++ *((int *)&__m128_result[2]) = 0x35800000; ++ *((int *)&__m128_result[1]) = 0x37800000; ++ *((int *)&__m128_result[0]) = 0x37000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((int *)&__m128_result[3]) = 0xffffe000; ++ *((int *)&__m128_result[2]) = 0xffffe000; ++ *((int *)&__m128_result[1]) = 0xffffe000; ++ *((int *)&__m128_result[0]) = 0xffffe000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((int *)&__m128_result[3]) = 0x35200000; ++ *((int *)&__m128_result[2]) = 0x35200000; ++ *((int *)&__m128_result[1]) = 0x35200000; ++ *((int *)&__m128_result[0]) = 0x35200000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000100; ++ *((int *)&__m128_op0[2]) = 0x0f00fe00; ++ *((int *)&__m128_op0[1]) = 0x0000017f; ++ *((int *)&__m128_op0[0]) = 0xff00fe7f; ++ *((unsigned long *)&__m128d_result[1]) = 0x3727f00000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xc7e01fcfe0000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000020; ++ *((int *)&__m128_op0[0]) = 0x00000020; ++ *((unsigned long *)&__m128d_result[1]) = 0x36f0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x36f0000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xbd994889; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0a092444; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x3941248880000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf0bc9a5278285a4a; ++ *((int *)&__m128_result[3]) = 0xc6178000; ++ *((int *)&__m128_result[2]) = 0xbb4a4000; ++ *((int *)&__m128_result[1]) = 0x47050000; ++ *((int *)&__m128_result[0]) = 0x43494000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00040004; ++ *((int *)&__m128_op0[2]) = 0x00040004; ++ *((int *)&__m128_op0[1]) = 0x00040004; ++ *((int *)&__m128_op0[0]) = 0x00040004; ++ *((unsigned long *)&__m128d_result[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x37c0001000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((int *)&__m128_result[3]) = 0xffe00000; ++ *((int *)&__m128_result[2]) = 0xffe00000; ++ *((int *)&__m128_result[1]) = 0xffe00000; ++ *((int *)&__m128_result[0]) = 0xffe00000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xffffe000; ++ *((int *)&__m128_result[0]) = 0xffffe000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x007f7f7f; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x380fdfdfc0000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c +new file mode 100644 +index 000000000..e8f4f12b9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c +@@ -0,0 +1,278 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x004200a0; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x004200a0; ++ *((int *)&__m128_op0[0]) = 0x00200001; ++ *((int *)&__m128_op1[3]) = 0x004200a0; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x004200a0; ++ *((int *)&__m128_op1[0]) = 0x00200000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00010001; ++ *((int *)&__m128_op1[2]) = 0x0001007c; ++ *((int *)&__m128_op1[1]) = 0x00010001; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x80808080; ++ *((int *)&__m128_op1[2]) = 0x80808080; ++ *((int *)&__m128_op1[1]) = 0x80808080; ++ *((int *)&__m128_op1[0]) = 0x80808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xfffffffc; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffffc; ++ *((int *)&__m128_op1[3]) = 0x00000001; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000103; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000049000000c0; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffffff29; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000100000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7ff0000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000002c002400; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long *)&__m128d_op1[0]) = 0x28bf0351ec69b5f2; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000dc300003ffb; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000dc300003ffb; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000ffff3fbfffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fffffff7fffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x7ffffffb; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xbba0c07b51230d5c; ++ *((unsigned long *)&__m128d_op0[0]) = 0xa15f3f9e8763c2b9; ++ *((unsigned long *)&__m128d_op1[1]) = 0xbba0c07b51230d5c; ++ *((unsigned long *)&__m128d_op1[0]) = 0xa15f3f9e8763c2b9; ++ *((int *)&__m128_result[3]) = 0x9d0603db; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x9d0603db; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128d_op1[1]) = 0x8101010181010101; ++ *((unsigned long *)&__m128d_op1[0]) = 0x8101010181010101; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffc00000ff800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffdfffe80008000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0xffeffff4; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000090; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000090; ++ *((unsigned long *)&__m128d_op1[1]) = 0x004eff6200d2ff76; ++ *((unsigned long *)&__m128d_op1[0]) = 0xff70002800be00a0; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c +new file mode 100644 +index 000000000..85db95762 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c +@@ -0,0 +1,161 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m128d_result[0]) = 0xbff0000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x40cd120000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x4050000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0086000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0082000000000007; ++ *((unsigned long *)&__m128d_result[1]) = 0x4160c00000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x4110000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff8000010f800000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000051649b6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000003e0000003f; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x41945926d8000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe82fe0200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe82fe0200000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xc177d01fe0000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128d_result[1]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x40f0001000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128d_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x40f3fa0000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xc0fffff000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c +new file mode 100644 +index 000000000..f8839cfcd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c +@@ -0,0 +1,264 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03ff03ff03ff03ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x438ff81ff81ff820; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_l (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128d_result[1]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128d_result[0]) = 0x43d3e0000013e000; ++ __m128d_out = __lsx_vffint_d_l (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_l (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_l (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xbff0000000000000; ++ __m128d_out = __lsx_vffint_d_l (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0674c8868a74fc80; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfdce8003090b0906; ++ *((unsigned long *)&__m128d_result[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128d_result[0]) = 0xc3818bffe7b7a7b8; ++ __m128d_out = __lsx_vffint_d_l (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((int *)&__m128_result[3]) = 0x4b7f00ff; ++ *((int *)&__m128_result[2]) = 0x4b7f00ff; ++ *((int *)&__m128_result[1]) = 0x4b7f00ff; ++ *((int *)&__m128_result[0]) = 0x4b7f00ff; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000004; ++ *((int *)&__m128_result[3]) = 0x40800000; ++ *((int *)&__m128_result[2]) = 0x4b800000; ++ *((int *)&__m128_result[1]) = 0x47800080; ++ *((int *)&__m128_result[0]) = 0x40800000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x47000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x76f424887fffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x3f800000; ++ *((int *)&__m128_result[1]) = 0x4eede849; ++ *((int *)&__m128_result[0]) = 0x4f000000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa352bfac9269e0aa; ++ *((int *)&__m128_result[3]) = 0xce23d33d; ++ *((int *)&__m128_result[2]) = 0x4edd53ea; ++ *((int *)&__m128_result[1]) = 0xceb95a81; ++ *((int *)&__m128_result[0]) = 0xcedb2c3f; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x3f800000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003ff8; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x467fe000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0xbf800000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xcf000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x5eff0000; ++ *((int *)&__m128_result[2]) = 0x5eff0000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000e3; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfda9b23a624082fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((int *)&__m128_result[3]) = 0x43630000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xdc159371; ++ *((int *)&__m128_result[0]) = 0x4f7fff00; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000040; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x42800000; ++ *((int *)&__m128_result[0]) = 0x42800000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x43800000; ++ *((int *)&__m128_result[0]) = 0x43800000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x59f7fd70; ++ *((int *)&__m128_result[0]) = 0x59f7fd70; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000ef0000000003b; ++ *((int *)&__m128_result[3]) = 0x577fff00; ++ *((int *)&__m128_result[2]) = 0x577fff00; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x596f0000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c +new file mode 100644 +index 000000000..9150e27ca +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c +@@ -0,0 +1,102 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8493941335f5cc0c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x625a7312befcb21e; ++ *((unsigned long *)&__m128d_result[1]) = 0x43e092728266beba; ++ *((unsigned long *)&__m128d_result[0]) = 0x43d8969cc4afbf2d; ++ __m128d_out = __lsx_vffint_d_lu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_lu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_lu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_lu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_wu (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001600000016; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001600000016; ++ *((int *)&__m128_result[3]) = 0x41b00000; ++ *((int *)&__m128_result[2]) = 0x41b00000; ++ *((int *)&__m128_result[1]) = 0x41b00000; ++ *((int *)&__m128_result[0]) = 0x41b00000; ++ __m128_out = __lsx_vffint_s_wu (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((int *)&__m128_result[3]) = 0x4f800000; ++ *((int *)&__m128_result[2]) = 0x4f800000; ++ *((int *)&__m128_result[1]) = 0x4f800000; ++ *((int *)&__m128_result[0]) = 0x4f800000; ++ __m128_out = __lsx_vffint_s_wu (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000442800007b50; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0204; ++ *((int *)&__m128_result[3]) = 0x46885000; ++ *((int *)&__m128_result[2]) = 0x46f6a000; ++ *((int *)&__m128_result[1]) = 0x4f800000; ++ *((int *)&__m128_result[0]) = 0x4f7fff02; ++ __m128_out = __lsx_vffint_s_wu (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_wu (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_wu (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c +new file mode 100644 +index 000000000..c60ff2b46 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c +@@ -0,0 +1,230 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++/* { dg-timeout 500 } */ ++#include "../simd_correctness_check.h" ++#include ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrint_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0003000700020005; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrint_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrint_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrint_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00ff000100ff00fe; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00ff003000ff00a0; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrint_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfd200ed2fd370775; ++ *((unsigned long *)&__m128d_op0[0]) = 0x96198318780e32c5; ++ *((unsigned long *)&__m128d_result[1]) = 0xfd200ed2fd370775; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfrint_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrne_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128d_op0[0]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128d_result[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128d_result[0]) = 0xe0404041e0404041; ++ __m128d_out = __lsx_vfrintrne_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000080800000808; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrne_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfrintrne_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000868686868686; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrne_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrp_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffc002000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfffc002000000000; ++ __m128d_out = __lsx_vfrintrp_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x9c9c9c9c00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfrintrp_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrp_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000007f00ff00ff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128d_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x3ff0000000000000; ++ __m128d_out = __lsx_vfrintrp_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000077af9450; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x3ff0000000000000; ++ __m128d_out = __lsx_vfrintrp_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000ffffff02fff4; ++ *((unsigned long *)&__m128d_result[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x6a57a30ff0000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x6a57a30ff0000000; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffff02000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x1f81e3779b97f4a8; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffff02000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrz_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x98ff98ff220e220d; ++ *((unsigned long *)&__m128d_op0[0]) = 0xa2e1a2601ff01ff0; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfrintrz_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrz_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000abba7980; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ccf98000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrz_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128d_result[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128d_result[0]) = 0xfe03fe3ffe01fa21; ++ __m128d_out = __lsx_vfrintrz_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x5847b72626ce61ef; ++ *((unsigned long *)&__m128d_op0[0]) = 0x110053f401e7cced; ++ *((unsigned long *)&__m128d_result[1]) = 0x5847b72626ce61ef; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrz_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c +new file mode 100644 +index 000000000..12cb02303 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c +@@ -0,0 +1,350 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++/* { dg-timeout 500 } */ ++#include "../simd_correctness_check.h" ++#include ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00100010; ++ *((int *)&__m128_op0[2]) = 0x00030000; ++ *((int *)&__m128_op0[1]) = 0x00060002; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0xca02f854; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0x00013fa0; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0xca02f854; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x000000ad; ++ *((int *)&__m128_op0[2]) = 0x00007081; ++ *((int *)&__m128_op0[1]) = 0x00000351; ++ *((int *)&__m128_op0[0]) = 0x0000b5f2; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00ff00ef; ++ *((int *)&__m128_op0[2]) = 0x00ff010f; ++ *((int *)&__m128_op0[1]) = 0x00ff00ff; ++ *((int *)&__m128_op0[0]) = 0x00ff010f; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00130013; ++ *((int *)&__m128_op0[2]) = 0x00130013; ++ *((int *)&__m128_op0[1]) = 0x00130013; ++ *((int *)&__m128_op0[0]) = 0x00130013; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x20202020; ++ *((int *)&__m128_op0[2]) = 0x20202020; ++ *((int *)&__m128_op0[1]) = 0x20202020; ++ *((int *)&__m128_op0[0]) = 0x20207fff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x01f50000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00020004; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffbfffb; ++ *((int *)&__m128_op0[2]) = 0xfffbfffb; ++ *((int *)&__m128_op0[1]) = 0xfffbfffb; ++ *((int *)&__m128_op0[0]) = 0xfffbfffb; ++ *((int *)&__m128_result[3]) = 0xfffbfffb; ++ *((int *)&__m128_result[2]) = 0xfffbfffb; ++ *((int *)&__m128_result[1]) = 0xfffbfffb; ++ *((int *)&__m128_result[0]) = 0xfffbfffb; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0ff780a1; ++ *((int *)&__m128_op0[2]) = 0x0efc01af; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xfe7f0000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0xfe7f0000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xefffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0xefffffff; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffff00; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffff00; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffff00; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffff00; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffb96b; ++ *((int *)&__m128_op0[2]) = 0xffff57c9; ++ *((int *)&__m128_op0[1]) = 0xffff6080; ++ *((int *)&__m128_op0[0]) = 0xffff4417; ++ *((int *)&__m128_result[3]) = 0xffffb96b; ++ *((int *)&__m128_result[2]) = 0xffff57c9; ++ *((int *)&__m128_result[1]) = 0xffff6080; ++ *((int *)&__m128_result[0]) = 0xffff4417; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00ff00ff; ++ *((int *)&__m128_op0[2]) = 0x00ff00ff; ++ *((int *)&__m128_op0[1]) = 0x62cbf96e; ++ *((int *)&__m128_op0[0]) = 0x4acfaf40; ++ *((int *)&__m128_result[3]) = 0x3f800000; ++ *((int *)&__m128_result[2]) = 0x3f800000; ++ *((int *)&__m128_result[1]) = 0x62cbf96e; ++ *((int *)&__m128_result[0]) = 0x4acfaf40; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00002000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x1fe02000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x3f800000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x3f800000; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x63636363; ++ *((int *)&__m128_op0[2]) = 0x63abdf16; ++ *((int *)&__m128_op0[1]) = 0x41f8e080; ++ *((int *)&__m128_op0[0]) = 0x16161198; ++ *((int *)&__m128_result[3]) = 0x63636363; ++ *((int *)&__m128_result[2]) = 0x63abdf16; ++ *((int *)&__m128_result[1]) = 0x42000000; ++ *((int *)&__m128_result[0]) = 0x3f800000; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrm_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xa5c4c774; ++ *((int *)&__m128_op0[2]) = 0x856ba83b; ++ *((int *)&__m128_op0[1]) = 0x8003caef; ++ *((int *)&__m128_op0[0]) = 0x54691124; ++ *((int *)&__m128_result[3]) = 0xbf800000; ++ *((int *)&__m128_result[2]) = 0xbf800000; ++ *((int *)&__m128_result[1]) = 0xbf800000; ++ *((int *)&__m128_result[0]) = 0x54691124; ++ __m128_out = __lsx_vfrintrm_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00010002; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xff960015; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffd60015; ++ __m128_out = __lsx_vfrintrm_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x3c992b2e; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffff730f; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffff730f; ++ __m128_out = __lsx_vfrintrz_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000016; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrz_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x18171615; ++ *((int *)&__m128_op0[2]) = 0x17161514; ++ *((int *)&__m128_op0[1]) = 0x16151413; ++ *((int *)&__m128_op0[0]) = 0x15141312; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrz_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x62cbf96e; ++ *((int *)&__m128_op0[2]) = 0x4acfaf40; ++ *((int *)&__m128_op0[1]) = 0xf0bc9a52; ++ *((int *)&__m128_op0[0]) = 0x78285a4a; ++ *((int *)&__m128_result[3]) = 0x62cbf96e; ++ *((int *)&__m128_result[2]) = 0x4acfaf40; ++ *((int *)&__m128_result[1]) = 0xf0bc9a52; ++ *((int *)&__m128_result[0]) = 0x78285a4a; ++ __m128_out = __lsx_vfrintrz_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrz_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c +new file mode 100644 +index 000000000..8d0d56632 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c +@@ -0,0 +1,349 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000040d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x004f1fcfd01f9f9f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x9f4fcfcfcf800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x9c7c266e3faa293c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000015d926c7; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000e41b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrp_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000777777777777; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffff7777ffff7777; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrp_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000004000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff80ffffffffff80; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000ff80ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000b5207f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xc0f3fa0080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffec060; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c +new file mode 100644 +index 000000000..5dba807f6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c +@@ -0,0 +1,695 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00d4ccb8; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00124888; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfff00000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xfff00000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x80000000; ++ *((int *)&__m128_op0[2]) = 0xffffd860; ++ *((int *)&__m128_op0[1]) = 0x7fffffff; ++ *((int *)&__m128_op0[0]) = 0x80000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00008000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00008000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff80ffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x7ffffffe; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x4f804f80; ++ *((int *)&__m128_op0[0]) = 0x4f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0000007b; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000600; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x3f800000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x04870ba0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00009c7c; ++ *((int *)&__m128_op0[0]) = 0x00007176; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0667ae56; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x887c8beb; ++ *((int *)&__m128_op0[2]) = 0x969e00f2; ++ *((int *)&__m128_op0[1]) = 0x101f8b68; ++ *((int *)&__m128_op0[0]) = 0x0b6f8095; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00020000; ++ *((int *)&__m128_op0[2]) = 0x00020000; ++ *((int *)&__m128_op0[1]) = 0x000001fc; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00020000; ++ *((int *)&__m128_op0[0]) = 0xffff0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0a752a55; ++ *((int *)&__m128_op0[1]) = 0x0a753500; ++ *((int *)&__m128_op0[0]) = 0xa9fa0d06; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7fffffff; ++ *((int *)&__m128_op0[2]) = 0x7fffffff; ++ *((int *)&__m128_op0[1]) = 0x7fffffff; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x000d0254; ++ *((int *)&__m128_op0[2]) = 0x0000007e; ++ *((int *)&__m128_op0[1]) = 0x00000014; ++ *((int *)&__m128_op0[0]) = 0x00140014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x39412488; ++ *((int *)&__m128_op0[0]) = 0x80000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000014; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010001; ++ *((int *)&__m128_op0[2]) = 0x00010001; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x2e34594c; ++ *((int *)&__m128_op0[0]) = 0x3b000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x7ffffffe; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010001; ++ *((int *)&__m128_op0[2]) = 0x00010001; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x7ff000ff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00ff00ff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffefffe; ++ *((int *)&__m128_op0[2]) = 0xfffeffff; ++ *((int *)&__m128_op0[1]) = 0xfffefffe; ++ *((int *)&__m128_op0[0]) = 0xfffeffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000033a; ++ *((int *)&__m128_op0[2]) = 0x0bde0853; ++ *((int *)&__m128_op0[1]) = 0x0a960e6b; ++ *((int *)&__m128_op0[0]) = 0x0a4f0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x7ffffffe; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xfffffffe; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0x7ffeffff; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0x7ffeffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x80808080; ++ *((int *)&__m128_op0[0]) = 0x80638063; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrph_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrph_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000080; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrph_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrph_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x80000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrmh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrmh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00ff00ff; ++ *((int *)&__m128_op0[2]) = 0x00ff00ff; ++ *((int *)&__m128_op0[1]) = 0x62cbf96e; ++ *((int *)&__m128_op0[0]) = 0x4acfaf40; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrmh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x0000ac26; ++ *((int *)&__m128_op0[1]) = 0x00ff0000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrmh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x6420e020; ++ *((int *)&__m128_op0[2]) = 0x8400c4e3; ++ *((int *)&__m128_op0[1]) = 0x20c4e0c4; ++ *((int *)&__m128_op0[0]) = 0xe0da6499; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfbffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x7bffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x18171615; ++ *((int *)&__m128_op0[2]) = 0x17161514; ++ *((int *)&__m128_op0[1]) = 0x16151413; ++ *((int *)&__m128_op0[0]) = 0x15141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x046a09ec; ++ *((int *)&__m128_op0[0]) = 0x009c0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x4f4f4f4f; ++ *((int *)&__m128_op0[2]) = 0x4f4f4f4f; ++ *((int *)&__m128_op0[1]) = 0x4f4f4f4f; ++ *((int *)&__m128_op0[0]) = 0x4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000cf4f4f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000cf4f4f00; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c +new file mode 100644 +index 000000000..7f6d2f4d1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c +@@ -0,0 +1,1028 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128d_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000ffff; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x0000ffff; ++ *((int *)&__m128_op0[0]) = 0x0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0xfffffffe; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0xfffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00040100; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffff00000080; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0xfffffffe; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0xfffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000e0180000e810; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000f0080000f800; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000e0180000e810; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000f0080000f800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffd30000; ++ *((int *)&__m128_op0[2]) = 0x00130000; ++ *((int *)&__m128_op0[1]) = 0xffd30000; ++ *((int *)&__m128_op0[0]) = 0x00130000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xe1000000; ++ *((int *)&__m128_op0[2]) = 0x4deb2610; ++ *((int *)&__m128_op0[1]) = 0xe101e001; ++ *((int *)&__m128_op0[0]) = 0x4dec4089; ++ *((unsigned long *)&__m128i_result[1]) = 0x800000001d64c200; ++ *((unsigned long *)&__m128i_result[0]) = 0x800000001d881120; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x76f42488; ++ *((int *)&__m128_op0[0]) = 0x80000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0202f5f80000ff00; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x003fffc0; ++ *((int *)&__m128_op0[2]) = 0xffc0003f; ++ *((int *)&__m128_op0[1]) = 0xffc0ffc0; ++ *((int *)&__m128_op0[0]) = 0x003f003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffff7fffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffff8000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x42652524; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000003900000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff00ff7f; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x7f800000; ++ *((int *)&__m128_op0[1]) = 0x2d1da85b; ++ *((int *)&__m128_op0[0]) = 0x7f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fffffff; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x80307028; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x8040007f; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000fefefe6a; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000004fc04f81; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000004fc04f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000003a0000003a; ++ *((unsigned long *)&__m128d_op1[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000068; ++ *((unsigned long *)&__m128d_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128d_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x4429146a7b4c88b2; ++ *((unsigned long *)&__m128d_op0[0]) = 0xe22b3595efa4aa0c; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000400000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000fffffff5; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0xe7e5560400010001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xe7e5dabf00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x03050302; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x03010302; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000600007fff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000008ffffa209; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x046a09ec009c0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x000aa822; ++ *((int *)&__m128_op0[2]) = 0xa79308f6; ++ *((int *)&__m128_op0[1]) = 0x03aa355e; ++ *((int *)&__m128_op0[0]) = 0x1d37b5a1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffff00; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00001802; ++ *((int *)&__m128_op0[0]) = 0x041b0013; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x004200a000200000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001000101fd01fe; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000c2f90000bafa; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff80ffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x7ffffffe; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0101080408040804; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0804080407040804; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0101080408040804; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0804080407040804; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010001; ++ *((int *)&__m128_op0[2]) = 0x00010001; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000003ffda00f3; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000003ffda00f3; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffffadf; ++ *((int *)&__m128_op0[2]) = 0xfedbfefe; ++ *((int *)&__m128_op0[1]) = 0x5f5f7bfe; ++ *((int *)&__m128_op0[0]) = 0xdefb5ada; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff80000000; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffa6ff91fdd8ef77; ++ *((unsigned long *)&__m128d_op0[0]) = 0x061202bffb141c38; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfefffffffed08f77; ++ *((unsigned long *)&__m128d_op1[0]) = 0x8160cdd2f365ed0d; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0x084314a6; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0x084314a6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x3f413f4100000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000017fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x3a800000; ++ *((int *)&__m128_op0[2]) = 0x3a800000; ++ *((int *)&__m128_op0[1]) = 0x000ef000; ++ *((int *)&__m128_op0[0]) = 0x0000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x10404000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x09610001; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000001a; ++ *((int *)&__m128_op0[2]) = 0xfffffff7; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000202fe02; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffff00fc0000ff02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00040004; ++ *((int *)&__m128_op0[2]) = 0x00040004; ++ *((int *)&__m128_op0[1]) = 0x00040004; ++ *((int *)&__m128_op0[0]) = 0x00040004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00ffff00; ++ *((int *)&__m128_op0[2]) = 0xff00ff00; ++ *((int *)&__m128_op0[1]) = 0x00ffff00; ++ *((int *)&__m128_op0[0]) = 0xff00ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x013ec13e; ++ *((int *)&__m128_op0[1]) = 0xc03fc03f; ++ *((int *)&__m128_op0[0]) = 0xc0ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffdfffffff8; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fffffff7ffffffb; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x43800000; ++ *((int *)&__m128_op0[0]) = 0x43800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000100; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000014; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffff7; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x80307028ffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8040007fffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff84fff4; ++ *((int *)&__m128_op0[2]) = 0xff84fff4; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7fff0007e215b122; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7ffeffff7bfff828; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x07ffc000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffff0000; ++ *((int *)&__m128_op0[0]) = 0x0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf039b8c0; ++ *((int *)&__m128_op0[2]) = 0xc61e81ef; ++ *((int *)&__m128_op0[1]) = 0x6db7da53; ++ *((int *)&__m128_op0[0]) = 0xfbd2e34b; ++ *((unsigned long *)&__m128i_result[1]) = 0x80000000ffffd860; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff80000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00020000; ++ *((int *)&__m128_op0[0]) = 0xffff0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00027113; ++ *((int *)&__m128_op0[2]) = 0x50a27112; ++ *((int *)&__m128_op0[1]) = 0x00d57017; ++ *((int *)&__m128_op0[0]) = 0x94027113; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff80ff80; ++ *((int *)&__m128_op0[2]) = 0x7e017f01; ++ *((int *)&__m128_op0[1]) = 0x7f3b7f3f; ++ *((int *)&__m128_op0[0]) = 0x7f3b7f21; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000011ff040; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000047fe2f0; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000047fe2f0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c +new file mode 100644 +index 000000000..9c5bb9131 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c +@@ -0,0 +1,345 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x0000c77c; ++ *((int *)&__m128_op0[2]) = 0x000047cd; ++ *((int *)&__m128_op0[1]) = 0x0000c0f1; ++ *((int *)&__m128_op0[0]) = 0x00006549; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0xffffffee; ++ *((int *)&__m128_op0[0]) = 0x00000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0000ffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x63636363; ++ *((int *)&__m128_op0[2]) = 0x63636363; ++ *((int *)&__m128_op0[1]) = 0x63636363; ++ *((int *)&__m128_op0[0]) = 0x63636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xfffffffe; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x02020004; ++ *((int *)&__m128_op0[2]) = 0x02020202; ++ *((int *)&__m128_op0[1]) = 0x00002000; ++ *((int *)&__m128_op0[0]) = 0x00010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7fff7fff; ++ *((int *)&__m128_op0[2]) = 0x7fff7fff; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0x0000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x000000ff; ++ *((int *)&__m128_op0[2]) = 0x808000ff; ++ *((int *)&__m128_op0[1]) = 0x000000ff; ++ *((int *)&__m128_op0[0]) = 0x808000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x7f7f7f7f; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0x00000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00002000; ++ *((int *)&__m128_op0[2]) = 0x00002000; ++ *((int *)&__m128_op0[1]) = 0x10000000; ++ *((int *)&__m128_op0[0]) = 0x10000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x67eb85af; ++ *((int *)&__m128_op0[2]) = 0xb2ebb000; ++ *((int *)&__m128_op0[1]) = 0xc8847ef6; ++ *((int *)&__m128_op0[0]) = 0xed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000400000007004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x3c600000ff800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x6a57a30ff0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010001; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000009c83e21a; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000022001818; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x800000001d64c200; ++ *((unsigned long *)&__m128d_op0[0]) = 0x800000001d881120; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000f0009d3c; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000016fff9dff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000040a04000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000040a04000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x03fc03fc03fc03fc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-handling-and-shuff.patch b/LoongArch-Add-tests-for-SX-vector-handling-and-shuff.patch new file mode 100644 index 0000000000000000000000000000000000000000..a65a289a028c6f79f65098f011e87c4207178dc8 --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-handling-and-shuff.patch @@ -0,0 +1,5411 @@ +From ab7f1db887733fabf41c7a39730c48376e29100c Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 11:34:56 +0800 +Subject: [PATCH 096/124] LoongArch: Add tests for SX vector handling and + shuffle instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vbsll.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vbsrl.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vextrins.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vilvh.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vilvl.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vpackev.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vpackod.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vpickev.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vpickod.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vpremi.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vreplve.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vreplvei.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vshuf.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vbsll.c | 83 +++ + .../loongarch/vector/lsx/lsx-vbsrl.c | 55 ++ + .../loongarch/vector/lsx/lsx-vextrins.c | 479 +++++++++++++++++ + .../loongarch/vector/lsx/lsx-vilvh.c | 353 +++++++++++++ + .../loongarch/vector/lsx/lsx-vilvl.c | 327 ++++++++++++ + .../loongarch/vector/lsx/lsx-vinsgr2vr.c | 278 ++++++++++ + .../loongarch/vector/lsx/lsx-vpackev.c | 452 ++++++++++++++++ + .../loongarch/vector/lsx/lsx-vpackod.c | 461 +++++++++++++++++ + .../loongarch/vector/lsx/lsx-vpickev.c | 362 +++++++++++++ + .../loongarch/vector/lsx/lsx-vpickod.c | 336 ++++++++++++ + .../loongarch/vector/lsx/lsx-vpickve2gr.c | 488 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vpremi.c | 20 + + .../loongarch/vector/lsx/lsx-vreplgr2vr.c | 212 ++++++++ + .../loongarch/vector/lsx/lsx-vreplve.c | 300 +++++++++++ + .../loongarch/vector/lsx/lsx-vreplvei.c | 293 +++++++++++ + .../loongarch/vector/lsx/lsx-vshuf.c | 394 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vshuf4i.c | 348 +++++++++++++ + 17 files changed, 5241 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c +new file mode 100644 +index 000000000..34246c551 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c +@@ -0,0 +1,83 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ffffff000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff000000ff00; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0a00000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_result[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001580000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c +new file mode 100644 +index 000000000..986b7d566 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c +@@ -0,0 +1,55 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000040100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010000; ++ __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x003fffffff000000; ++ __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0005fe0300010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe03000101010000; ++ __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000d3259a; ++ __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c +new file mode 100644 +index 000000000..8d4158b57 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c +@@ -0,0 +1,479 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x92); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xc2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0200020002000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0200020002000200; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff02000200; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c03e17edd781b11; ++ *((unsigned long *)&__m128i_op0[0]) = 0x342caf9be55700b5; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0c03e17edd781b11; ++ *((unsigned long *)&__m128i_result[0]) = 0x342caf9bffff1fff; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0xcc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xc6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000a16316b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000063636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x16161616a16316b0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000a16316b0; ++ *((unsigned long *)&__m128i_result[0]) = 0x16161616a16316b0; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xa7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff489b693120950; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc45a851c40c18; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffc45a851c40c18; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x48); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xcc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000005d5d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x41); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefe6a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fefefe6a; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c2bac2c2; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x7c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ffffffeffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ffffffeffffffff; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xe6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000a0000000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000a00000009; ++ *((unsigned long *)&__m128i_result[1]) = 0x000a000a0000000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x000a000a000a000a; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0xaf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x67); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004fcfcfd01f9f9f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9f4fcfcfcf800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004fcfcfd01f9f9f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9f4fcfcfcf800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x004f1fcfd01f9f9f; ++ *((unsigned long *)&__m128i_result[0]) = 0x9f4fcfcfcf800000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xda); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x75b043c4d17db125; ++ *((unsigned long *)&__m128i_op0[0]) = 0xeef8227b596117b1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x75b043c4d17db125; ++ *((unsigned long *)&__m128i_result[0]) = 0xeef8227b4f8017b1; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000de32400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x027c027c000027c0; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x77); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363797c63996399; ++ *((unsigned long *)&__m128i_op0[0]) = 0x171f0a1f6376441f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6363797c63990099; ++ *((unsigned long *)&__m128i_result[0]) = 0x171f0a1f6376441f; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0x94); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0bd80bd80bd80000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xf9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x41dfbe1f41e0ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffc2ffe000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffc100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x41dfbe1f41e0ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffc100010001; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xec); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long *)&__m128i_result[1]) = 0x5237c1baffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x7d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffbd994889; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000a092444; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000890000000000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0x58); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000fea0000fffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff8607db959f; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000fea0000fffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xff0cff78ff96ff14; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xc2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01ef013f01e701f8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x35bb8d32b2625c00; ++ *((unsigned long *)&__m128i_result[1]) = 0x00008d3200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0xea); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8003000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4040ffffc0400004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8003000000020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x64); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x74); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_result[0]) = 0xff000001ffff9515; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0x67); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xf4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xc1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x71); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x82); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xd5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0xf3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbbe5560400010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe7e5dabf00010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbbe5560400010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe7e5dabf00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xe7e5560400010001; ++ *((unsigned long *)&__m128i_result[0]) = 0xe7e5dabf00010001; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0xf3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x5d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x24); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xb6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x975ca6046e2e4889; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1748c4f9ed1a5870; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x6a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffc606ec5; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000014155445; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x76); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000024170000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000aa822a79308f6; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000024170000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000024170000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x56); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xc5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01017f3c00000148; ++ *((unsigned long *)&__m128i_op1[0]) = 0x117d7f7b093d187f; ++ *((unsigned long *)&__m128i_result[1]) = 0x117d7f7b093d187f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000034; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x70); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe519ab7e71e33848; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffab7e71e33848; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0xbc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff760386bdae46; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc1fc7941bc7e00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff7603; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0xc3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff2356fe165486; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000003b0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff2356fe165486; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x70); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x8a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c +new file mode 100644 +index 000000000..aa802b295 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c +@@ -0,0 +1,353 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007fffff00000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x195f307a5d04acbb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6a1a3fbb3c90260e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x195f307a5d04acbb; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8644000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaed495f03343a685; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffbe6ed563; ++ *((unsigned long *)&__m128i_result[1]) = 0x8644ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000fffe; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000e13; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000e13; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000a000a00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000a000a00000000; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00ff00ff00ff00; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000004f804f80; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000004f804f80; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x004f0080004f0080; ++ *((unsigned long *)&__m128i_result[0]) = 0x004f0080004f0080; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ffa7f8ff81; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000003f0080ffc0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007fff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000a7f87fffff81; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000ffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000080003f80ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x202020202020ff20; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x2000200020002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x2000200020002000; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808ffff0808ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808ffff0808ffff; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000157; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002008360500088; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000008; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000f3040705; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00ff00ff00ff00; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c +new file mode 100644 +index 000000000..88c66f220 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c +@@ -0,0 +1,327 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000201000000000b; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffcff; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7404443064403aec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000d6eefefc0498; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff7f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2d1da85b7f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x002d001dd6a8ee5b; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe7ffc8004009800; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001000000010; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000c0000bd49; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000c7fff000c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000c7fff000c; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff0000007f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001e8e1d8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000e400000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001e8e1d8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000e400000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000e4e4; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000101; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0008000000000000; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffe0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffe0; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80005613; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007f800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000807f80808000; ++ *((unsigned long *)&__m128i_result[0]) = 0x80006b0000000b00; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000080808000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080006b0000000b; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc0808000c0808000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xc080800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc080800000000000; ++ __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff010300ff0103; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007ffff001000300; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff0001000300; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f00ff00ff00fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x8); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0014001400140000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001400000000; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000009c007c00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000071007600; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000060002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000060002; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe4c8b96e2560afe9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc001a1867fffa207; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000c0010000a186; ++ *((unsigned long *)&__m128i_result[0]) = 0x00067fff0002a207; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000014414104505; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1011050040004101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000014414104505; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1011050040004101; ++ *((unsigned long *)&__m128i_result[1]) = 0x1010111105050000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4040000041410101; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffac5cffffac5c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffac5cffffac5c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x010169d9010169d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01010287010146a1; ++ *((unsigned long *)&__m128i_result[1]) = 0xff01ff01ac025c87; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01ff01ac465ca1; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff01ff01ac025c87; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01ff01ac465ca1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff01ff0100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xac465ca100000000; ++ __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000eefff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000246d9755; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002427c2ee; ++ *((unsigned long *)&__m128i_result[1]) = 0xf8e10000a03a0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff2427e3e2c2ee; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffe4ffe4ffe4ffe4; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffe4ffe4ffe4ffe4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff040; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01e41ffff0e440; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01e41ffff0ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff01ffffe41f0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff00000ffff0000; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c +new file mode 100644 +index 000000000..2b9dcc0b5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c +@@ -0,0 +1,278 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000007942652524; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4265252400000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_op1 = 0x0000007942652524; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff2524ffffffff; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vinsgr2vr_d (__m128i_op0, long_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5d5d5d5d5d5d5d55; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x5d5d5d005d5d5d55; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x1); ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2020202020202020; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_result[0]) = 0x202020202020ff20; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00fe01fc0005fff4; ++ int_op1 = 0x0000000020202020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000820202020; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe01fc0005fff4; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffbfffffffbf; ++ long_op1 = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003a24; ++ __m128i_out = __lsx_vinsgr2vr_d (__m128i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ef8000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ef8000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ef8000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_d (__m128i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001000; ++ int_op1 = 0x000000007ff00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000020006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000020006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000600; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001f1f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff000000001f1f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vinsgr2vr_d (__m128i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ffffff0000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x04faf60009f5f092; ++ *((unsigned long *)&__m128i_op0[0]) = 0x04fafa9200000000; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x04faf600fff5f092; ++ *((unsigned long *)&__m128i_result[0]) = 0x04fafa9200000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c +new file mode 100644 +index 000000000..030e87fd8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c +@@ -0,0 +1,452 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001802041b0013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff000000ff; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf4b6f3f52f4ef4a8; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x203e16d116de012b; ++ *((unsigned long *)&__m128i_result[1]) = 0x00f900d7003d00e4; ++ *((unsigned long *)&__m128i_result[0]) = 0x003e00d100de002b; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc2f9bafac2fac2fa; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbdf077eee7e20468; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe3b1cc6953e7db29; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000e7e20468; ++ *((unsigned long *)&__m128i_result[0]) = 0xc2fac2fa53e7db29; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf8f8e018f8f8e810; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf8f8f008f8f8f800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000e0180000e810; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000f0080000f800; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1211100f11100f0e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x100f0e0d0f0e0d0c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f000d200e000c20; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xe3e3e3e3e3e3e3e3; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00007ffe00007ffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001c00ffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x000001000f00fe00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000017fff00fe7f; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000f0009d3c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000016fff9d3d; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffff000f0008d3c; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff0016fff8d3d; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff000000003c3c; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff0101ffff3d3d; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000958affff995d; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffefffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffefffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffefefffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002fffefffd0001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1202120212021202; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1202120212021202; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0202fe02fd020102; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5a6f5c53ebed3faa; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa36aca4435b8b8e1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5a6f5c53ebed3faa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa36aca4435b8b8e1; ++ *((unsigned long *)&__m128i_result[1]) = 0x5c535c533faa3faa; ++ *((unsigned long *)&__m128i_result[0]) = 0xca44ca44b8e1b8e1; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x77c0404a4000403a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x77c03fd640003fc6; ++ *((unsigned long *)&__m128i_result[1]) = 0x04c0044a0400043a; ++ *((unsigned long *)&__m128i_result[0]) = 0x04c004d6040004c6; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000006362ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000d0000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000dffff000d; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_result[1]) = 0x2080208020802080; ++ *((unsigned long *)&__m128i_result[0]) = 0x2080208020802080; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000001b0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000001b0000; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000053a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff9000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc000400000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffc000400000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001f00000000; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080000080800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x9380c4009380c400; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc2007aff230027; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080005eff600001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01017f3c00000148; ++ *((unsigned long *)&__m128i_op1[0]) = 0x117d7f7b093d187f; ++ *((unsigned long *)&__m128i_result[1]) = 0xff23002700000148; ++ *((unsigned long *)&__m128i_result[0]) = 0xff600001093d187f; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002711250a27112; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00d2701294027112; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff7112ffff7112; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff7012ffff7112; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb020302101b03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_op1[1]) = 0x30eb020302101b03; ++ *((unsigned long *)&__m128i_op1[0]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_result[1]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_result[0]) = 0x020310d0c0030220; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000eefff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000eefff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000efffefff; ++ *((unsigned long *)&__m128i_result[0]) = 0xa03aa03ae3e2e3e2; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x02b010f881a281a2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8140001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000010f8000081a2; ++ *((unsigned long *)&__m128i_result[0]) = 0x000069bb00000001; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c +new file mode 100644 +index 000000000..783eedae1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c +@@ -0,0 +1,461 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000020100; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00003ff000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffc00000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x03574e3a03574e3a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000001fe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000003a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000015; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_op1[1]) = 0x803f800080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe0404041c0404040; ++ *((unsigned long *)&__m128i_result[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_result[0]) = 0x803f800080000000; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe80000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe000000000000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe80ff80ffff0000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x11000f2000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f000d2000000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f804f804f804f80; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000c000ffffc000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000006f00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000c00000000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2222272011111410; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2222272011111410; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffef8; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffdfffdfffdffee0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffdfffdf; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010100000100000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100000101000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000000010; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3918371635143312; ++ *((unsigned long *)&__m128i_result[1]) = 0x21011f3f193d173b; ++ *((unsigned long *)&__m128i_result[0]) = 0xff39ff37ff35ff33; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003fbf3fbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff3fbfffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80806362; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00008080; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000100000000fc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000100000000fc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0404050404040404; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0404050404040404; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000004040504; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000004040504; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000807f80808000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80006b0000000b00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000807f00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x80006b0080808080; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000400000004000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00004000ffffffff; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000080008; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001400000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffe00000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x00cd006300cd0063; ++ *((unsigned long *)&__m128i_result[0]) = 0x00cd006300cd0063; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000aa822a79308f6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03aa558e1d37b5a1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff80fd820000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000aa822a79308f6; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000084d12ce; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2e34594c3b000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x002e0059003b0000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001e001e001e001e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001e001e001e001e; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffaeffaeffaeffae; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffaeffaeffaeffae; ++ *((unsigned long *)&__m128i_result[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_result[0]) = 0x001effae001effae; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000440efffff000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000003b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000440efffff000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff2356fe165486; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5efeb3165bd7653d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff2356fe165486; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000cecd00004657; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000c90000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00019d9a00008cae; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c +new file mode 100644 +index 000000000..58591f1bb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c +@@ -0,0 +1,362 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc2409edab019323f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x460f3b393ef4be3a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x460f3b393ef4be3a; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0004007c00fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000fc0000; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128i_op0[0]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4811fda96793b23a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8f10624016be82fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfda9b23a624082fd; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xaaaaffebcfb748e0; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfd293eab528e7ebe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffeb48e03eab7ebe; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff00010000fff; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x2000200000013fa0; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000013fa0; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000f7d1000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x773324887fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000017161515; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000095141311; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000017fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x1716151595141311; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[0]) = 0x4040404040404040; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000dfa6e0c6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000d46cdc13; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long *)&__m128i_op1[0]) = 0x28bf0351ec69b5f2; ++ *((unsigned long *)&__m128i_result[1]) = 0xdfa6e0c6d46cdc13; ++ *((unsigned long *)&__m128i_result[0]) = 0x21fc7081ec69b5f2; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x04c0044a0400043a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x04c004d6040004c6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[1]) = 0x044a043a04d604c6; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001b4a00007808; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001b4a00007808; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001b4a00007808; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001b4a00007808; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fc03fc000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fc03fc000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fc03fc000000004; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffffffff; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000103030102ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010102ffff; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffab7e71e33848; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x3b5eae24ab7e3848; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000009c83e21a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000022001818; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000e21a00001818; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c +new file mode 100644 +index 000000000..74269e319 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c +@@ -0,0 +1,336 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000001; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf436f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080000000000000; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff51cf8da; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffd6040188; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000101fffff8b68; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000b6fffff8095; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffff51cffffd604; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xa); ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff0cffffff18; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefffefffeff6a0c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc2f9bafac2fac2fa; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffefefe6a; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c2bac2c2; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0f000d200e000c20; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x11000f200f000d20; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000805; ++ *((unsigned long *)&__m128i_op0[0]) = 0x978d95ac768d8784; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000408; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff91fffffff5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff00650001ffb0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffff0001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ca02f854; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ca02f854; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ca0200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ca0200000000; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000c6c6ee22; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c6c62e8a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000c6c6ee22; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c6c62e8a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d001b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x21201f1e19181716; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff000000ff; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0006000000040000; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x6363636363636363; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fc03fc000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fc03fc000000003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f7f1fd800000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f1f00003f3f0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f3f00007f1f0000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff9f017f1fa0b199; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1197817fd839ea3e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000033; ++ *((unsigned long *)&__m128i_result[1]) = 0xff011fb11181d8ea; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080808000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffefffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_result[1]) = 0x67ebb2ebc884ed3f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003ddc; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c +new file mode 100644 +index 000000000..acca2bee9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c +@@ -0,0 +1,488 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x7); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x4); ++ int_result = 0x0000000000000000; ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x01fc020000fe0100; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x7); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000463fd2902d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5ccd54bbfcac806c; ++ unsigned_int_result = 0x00000000000000ac; ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd705c77a7025c899; ++ unsigned_int_result = 0x000000000000edfa; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0400040004000400; ++ unsigned_int_result = 0x0000000000000400; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007d3ac600; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x7); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dffbfff00000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0200400000000001; ++ unsigned_int_result = 0x0000000000000001; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003fffffff; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001ffffffff; ++ long_int_result = 0x00000001ffffffff; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00a6ffceffb60052; ++ unsigned_int_result = 0x0000000000000084; ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xa); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ int_result = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xc); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ unsigned_int_result = 0x00000000ffffffff; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ long_int_result = 0xffffffffffffffff; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000; ++ unsigned_long_int_result = 0x3f8000003f800000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0bd80bd80bd80bd8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0bd80bd80bd80bd8; ++ unsigned_long_int_result = 0x0bd80bd80bd80bd8; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x8); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000c0000bd49; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000c7fff000c; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xb); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000100c6ffef10c; ++ unsigned_int_result = 0x00000000000000ff; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207f7f; ++ unsigned_int_result = 0x0000000020202020; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1; ++ int_result = 0x0000000000003a24; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ unsigned_int_result = 0x00000000000000ff; ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x9); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xb); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffe080f6efc100f7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xefd32176ffe100f7; ++ int_result = 0x0000000000002176; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ int_result = 0x0000000000000002; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80008000ec82ab51; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000800089e08000; ++ int_result = 0x0000000089e08000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6453f5e01d6e5000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fdec000000000; ++ int_result = 0x000000001d6e5000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6453f5e01d6e5000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fdec000000000; ++ int_result = 0x0000000001d6e5000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xe); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8d78336c83652b86; ++ *((unsigned long *)&__m128i_op0[0]) = 0x39c51f389c0d6112; ++ int_result = 0x000000009c0d6112; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e; ++ unsigned_int_result = 0x000000000000857a; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x021b7d2449678a35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030298a621030a49; ++ int_result = 0x00000000ffff8a35; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae; ++ unsigned_int_result = 0x000000000000001e; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xe); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8d78336c83652b86; ++ *((unsigned long *)&__m128i_op0[0]) = 0x39c51f389c0d6112; ++ int_result = 0x000000009c0d6112; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e; ++ unsigned_int_result = 0x000000000000857a; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x021b7d2449678a35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030298a621030a49; ++ int_result = 0x00000000ffff8a35; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae; ++ unsigned_int_result = 0x000000000000001e; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x8); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x000000000000001e; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac; ++ long_int_result = 0x000000003ddc5dac; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6453f5e01d6e5000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fdec000000000; ++ int_result = 0x000000001d6e5000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xe); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8d78336c83652b86; ++ *((unsigned long *)&__m128i_op0[0]) = 0x39c51f389c0d6112; ++ int_result = 0x000000009c0d6112; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e; ++ unsigned_int_result = 0x000000000000857a; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_out, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x021b7d2449678a35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030298a621030a49; ++ int_result = 0x00000000ffff8a35; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae; ++ unsigned_int_result = 0x000000000000001e; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0674c8868a74fc80; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfdce8003090b0906; ++ int_result = 0x00000000090b0906; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000; ++ unsigned_int_result = 0x00000000000000ff; ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xc); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f4f00004f4f0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f4f00004f4f0000; ++ unsigned_int_result = 0x000000004f4f0000; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000120000000d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000e0000000e; ++ unsigned_long_int_result = 0x0000000e0000000e; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c +new file mode 100644 +index 000000000..ef0ad676e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c +@@ -0,0 +1,20 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c +new file mode 100644 +index 000000000..a5f02b1b1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c +@@ -0,0 +1,212 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ int_op0 = 0x0000000059815d00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0400040004000400; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000400; ++ __m128i_out = __lsx_vreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f8000003f800000; ++ __m128i_out = __lsx_vreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000020202020; ++ *((unsigned long *)&__m128i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_result[0]) = 0x2020202020202020; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff000000ff; ++ __m128i_out = __lsx_vreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x000000007ff00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007ff00000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007ff00000; ++ __m128i_out = __lsx_vreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x000000000000001e; ++ *((unsigned long *)&__m128i_result[1]) = 0x1e1e1e1e1e1e1e1e; ++ *((unsigned long *)&__m128i_result[0]) = 0x1e1e1e1e1e1e1e1e; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c +new file mode 100644 +index 000000000..463adb48e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c +@@ -0,0 +1,300 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000045eef14fe8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ac; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x78c00000ff000000; ++ int_op1 = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[1]) = 0xff000000ff000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff000000ff000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x803f800080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe0404041c0404040; ++ int_op1 = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_result[0]) = 0xe0404041e0404041; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffff0001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ int_op1 = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000020006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffb4ff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffb4ff; ++ __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000020202020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000007ff00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000020006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff4; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ffff00ff00ff00; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00ff00ff00ff00; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001b; ++ int_op1 = 0xffffffff89e08000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001b0000001b; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefefefdbffefdfe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefefeeffef7fefe; ++ int_op1 = 0xffffffff9c0d6112; ++ *((unsigned long *)&__m128i_result[1]) = 0xbffefdfebffefdfe; ++ *((unsigned long *)&__m128i_result[0]) = 0xbffefdfebffefdfe; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff800000ff800000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff800000ff800000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd27db010d20fbf; ++ int_op1 = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0fbf0fbf0fbf0fbf; ++ *((unsigned long *)&__m128i_result[0]) = 0x0fbf0fbf0fbf0fbf; ++ __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000090b0906; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0xffffffffffff8a35; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x05dfffc3ffffffc0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000047fe2f0; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000047fe2f0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000047fe2f0; ++ __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffe011df03e; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf03ef03ef03ef03e; ++ *((unsigned long *)&__m128i_result[0]) = 0xf03ef03ef03ef03e; ++ __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c +new file mode 100644 +index 000000000..a81be76f1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c +@@ -0,0 +1,293 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000055555501; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000005555555554; ++ __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000036280000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x42a0000042a02000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd705c77a7025c899; ++ *((unsigned long *)&__m128i_result[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128i_result[0]) = 0xedfaedfaedfaedfa; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000300000003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000a0a08000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5350a08000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80010009816ac5de; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8001000184000bd8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0bd80bd80bd80bd8; ++ *((unsigned long *)&__m128i_result[0]) = 0x0bd80bd80bd80bd8; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1149a96eb1a08000; ++ *((unsigned long *)&__m128i_result[1]) = 0xb1a08000b1a08000; ++ *((unsigned long *)&__m128i_result[0]) = 0xb1a08000b1a08000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffcc9a989a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_result[1]) = 0xadadadadadadadad; ++ *((unsigned long *)&__m128i_result[0]) = 0xadadadadadadadad; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3131313131313131; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a753500a9fa0d06; ++ *((unsigned long *)&__m128i_result[1]) = 0x0d060d060d060d06; ++ *((unsigned long *)&__m128i_result[0]) = 0x0d060d060d060d06; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c +new file mode 100644 +index 000000000..4e7fcc02b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c +@@ -0,0 +1,394 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000007f00000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000004; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000000007f0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0404040404040404; ++ *((unsigned long *)&__m128i_result[0]) = 0x0404040404000404; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7efefefe82010201; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x418181017dfefdff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff81; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op2[1]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00adadad00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00adadad00000000; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc5c534920000c4ed; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xedededededededed; ++ *((unsigned long *)&__m128i_result[0]) = 0xedededededededed; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x04040403fafafafc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ff80; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000001a0000000b; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000080000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff6cffb5ff98ff6e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd7ff8dffa4ff7a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x34947b4b11684f92; ++ *((unsigned long *)&__m128i_op1[0]) = 0xee297a731e5c5f86; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffc0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000868686868686; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000d000d000d000d; ++ *((unsigned long *)&__m128i_result[0]) = 0x000d000d000d000d; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000300037ff000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000300a10003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000300037ff000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003000300a10003; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0909000009090000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0909000009090000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0909000009090000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0909000009090000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x002a05a2f059094a; ++ *((unsigned long *)&__m128i_op2[0]) = 0x05ad3ba576eae048; ++ *((unsigned long *)&__m128i_result[1]) = 0x0909e0480909e048; ++ *((unsigned long *)&__m128i_result[0]) = 0x0909e0480909e048; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000c0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001ffffff29; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000000000c0; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000001ffffff29; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff2900000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f54e0ab00000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op2[0]) = 0x010101fe0101fe87; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101fe870101fe87; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101fe8700000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x2000002000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x2000002020000020; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000004870ba0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op2[1]) = 0x8000000100000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x8000000000000103; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010300000103; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010300000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xada4808924882588; ++ *((unsigned long *)&__m128i_op0[0]) = 0xacad25090caca5a4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_op1[0]) = 0x030298a6a1030a49; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xdfa6e0c6d46cdc13; ++ *((unsigned long *)&__m128i_op0[0]) = 0x21fc7081ec69b5f2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002c002400; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffff6080ffff4417; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff0015172b; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff0015172b; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff0015172b; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf0003000f0003000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x021b7d2449678a35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030298a621030a49; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_op2[0]) = 0x030298a6a1030a49; ++ *((unsigned long *)&__m128i_result[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f7f00007f7f0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f7f80807f7f8080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fffe0000fffe; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffff10000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c +new file mode 100644 +index 000000000..cd441b841 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c +@@ -0,0 +1,348 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000030000; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xc9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004007c00fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x047c0404fc00fcfc; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x8a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00ff7f00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x85); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff51cf8da; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffd6040188; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff01018888; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x50); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007d00c50177ac5b; ++ *((unsigned long *)&__m128i_op0[0]) = 0xac82aa88a972a36a; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000c5ac01015b; ++ *((unsigned long *)&__m128i_result[0]) = 0xaaacac88a3a9a96a; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x7c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000a0000000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000a00000009; ++ *((unsigned long *)&__m128i_result[1]) = 0x0a0a0a000a0a0a00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a0a0a0009090900; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000100; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x003f800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x003f800000000000; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xd2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x6c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x81); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000dffff000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ffffff; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x6b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5f675e96e29a5a60; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x965f5e9660e25a60; ++ *((unsigned long *)&__m128i_result[0]) = 0xff7f7fffff7f7fff; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x34); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x131211101211100f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x11100f0e100f0e0d; ++ *((unsigned long *)&__m128i_result[1]) = 0x13101213120f1112; ++ *((unsigned long *)&__m128i_result[0]) = 0x110e1011100d0f10; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xcb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000110; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000431f851f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001011010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000043431f1f; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xf0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0xd1c0c0a5baf8f8d3; ++ *((unsigned long *)&__m128i_result[0]) = 0xecbbbbc5d5f3f3f3; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x7c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000454ffff9573; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000454ffff9573; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xa4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0xf3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0xd2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x007c000d00400000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000003f00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000007c00000040; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0x31); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0xb9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff00007fff0000; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0xcd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00000000ffff; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0x93); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f7f7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007f007f00007f7f; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0x58); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080808000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080808000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x8b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffdfffdfffdfffd; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefefefdbffefdfe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefefeeffef7fefe; ++ *((unsigned long *)&__m128i_result[1]) = 0xfef7fefebffefdfe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefefefdfefefeef; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x002a001a001a000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000002a001a; ++ *((unsigned long *)&__m128i_result[0]) = 0x001a000b00000000; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x78); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x98); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000010f8000081a2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000069bb00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001000010f8; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x44); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffff800; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffff800; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x8a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x36); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffda6e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffe3d6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xeeb1e4f4bc3763f3; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6f5edf5ada6fe3d7; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffe3d6; ++ *((unsigned long *)&__m128i_result[0]) = 0xeeb1e4f4bc3763f3; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100200001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100200001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xce23d33e43d9736c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x63b2ac27aa076aeb; ++ *((unsigned long *)&__m128i_result[1]) = 0x63b2ac27aa076aeb; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0xc8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0xc9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0xbf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x801d5de0000559e0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x77eb86788eebaf00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x89582bf870006860; ++ *((unsigned long *)&__m128i_op1[0]) = 0x89582bf870006860; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x94); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-subtraction-instru.patch b/LoongArch-Add-tests-for-SX-vector-subtraction-instru.patch new file mode 100644 index 0000000000000000000000000000000000000000..165df9370185196d120453a5145434c84d919ad7 --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-subtraction-instru.patch @@ -0,0 +1,4150 @@ +From dc800193eb03dc87e702d4f3aeb886337b6be870 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 10:05:37 +0800 +Subject: [PATCH 081/124] LoongArch: Add tests for SX vector subtraction + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmsub.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vssub-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vssub-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsub.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsubi.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vhsubw-1.c | 327 +++++++++++++ + .../loongarch/vector/lsx/lsx-vhsubw-2.c | 353 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vmsub.c | 461 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vssub-1.c | 398 +++++++++++++++ + .../loongarch/vector/lsx/lsx-vssub-2.c | 408 ++++++++++++++++ + .../loongarch/vector/lsx/lsx-vsub.c | 381 +++++++++++++++ + .../loongarch/vector/lsx/lsx-vsubi.c | 329 +++++++++++++ + .../loongarch/vector/lsx/lsx-vsubwev-1.c | 326 +++++++++++++ + .../loongarch/vector/lsx/lsx-vsubwev-2.c | 417 ++++++++++++++++ + .../loongarch/vector/lsx/lsx-vsubwod-1.c | 326 +++++++++++++ + .../loongarch/vector/lsx/lsx-vsubwod-2.c | 308 ++++++++++++ + 11 files changed, 4034 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c +new file mode 100644 +index 000000000..0b51cb8cf +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c +@@ -0,0 +1,327 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffc00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000008000000080; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff07effffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100110002; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffff01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffeff400000df4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ff91fffffff5; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00650001ffb0; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000bfffffffe0f6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010001000a; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0039ffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffbeffffffffffff; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000008140c80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0037ffdfffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0037ffdfffeb007f; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4e3e133738bb47d2; ++ *((unsigned long *)&__m128i_result[1]) = 0xff98007a004d0050; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff9ff4a0057000e; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000501ffff0005; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000600000001; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020000ffff0001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000001; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffae001effae; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_result[1]) = 0xffaeffadffaeffad; ++ *((unsigned long *)&__m128i_result[0]) = 0xffaeffadffaeffad; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffff02; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffff01; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9727b8499727b849; ++ *((unsigned long *)&__m128i_op0[0]) = 0x12755900b653f081; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7d7f13fc7c7ffbf4; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff9727ffff9727; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffe79ffffba5f; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000100010; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhsubw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000100c6ffef10c; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffff70; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff9001a47e; ++ __m128i_out = __lsx_vhsubw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000a6; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff59; ++ __m128i_out = __lsx_vhsubw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffc; ++ __m128i_out = __lsx_vhsubw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x002affd600000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcbc2723a4f12a5f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x343d8dc5b0ed5a08; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffe00006aea; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000003fe0000141e; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffc01ffffebe2; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x67eb8590b2ebafe1; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x56a09e662ab46b31; ++ *((unsigned long *)&__m128i_op1[0]) = 0xb4b8122ef4054bb3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4b47edd10bfab44d; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c +new file mode 100644 +index 000000000..26b51ee14 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c +@@ -0,0 +1,353 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffff0000010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe00fe00fe00fd01; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe00fffefe0100f6; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff0001ffffff0a; ++ __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000017161515; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000095141311; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x76f424887fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000170014; ++ *((unsigned long *)&__m128i_result[0]) = 0xff0cff78ff96ff14; ++ __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff7cffd6ffc700b0; ++ __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff00000000; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0001ffff0001; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010100000101; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefeff00fefeff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefeff00fefeff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x00007e7e00007e7e; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007e7e00007e7e; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa2e3a36363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa2e3a36463636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000a2e300006363; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000a2e300006363; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000052527d7d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000052527d7d; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002400180004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000024; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffff0000010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xabff54f1ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa5f7458b000802ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fff7fc01; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000002; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000002; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80008a7555aa; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a7535006af05cf9; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff758aaa56; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffa9fb0d07; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0f180000ffe00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ca02f854; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000004b01; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffb4ff; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001b4a00007808; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe4b5ffff87f8; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fc03fc000000003; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f7f1fd800000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fc03fc000000004; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc080800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc080800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7efefefe82010201; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x418181017dfefdff; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x67eb85afb2ebb000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff8000010f78; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff7f0080ff7ef088; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000155; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000f0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffff10000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c +new file mode 100644 +index 000000000..47cf33cfd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c +@@ -0,0 +1,461 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffff0002; ++ *((unsigned long *)&__m128i_op2[1]) = 0x54beed87bc3f2be1; ++ *((unsigned long *)&__m128i_op2[0]) = 0x8024d8f6a494afcb; ++ *((unsigned long *)&__m128i_result[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long *)&__m128i_result[0]) = 0x0024d8f6a494006a; ++ __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001ffff0001ffff; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffffff0ffe04000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001fc0000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000200010; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x040004000400040d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x040004000400040d; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xb327b9363c99d32e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa1e7b475d925730f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000003f80b0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m128i_result[1]) = 0xb327b9363c992b2e; ++ *((unsigned long *)&__m128i_result[0]) = 0xa1e7b475d925730f; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000004c7f4c7f; ++ *((unsigned long *)&__m128i_op2[0]) = 0xe0c0c0c0d1c7d1c6; ++ *((unsigned long *)&__m128i_result[1]) = 0x061006100613030c; ++ *((unsigned long *)&__m128i_result[0]) = 0x4d6814ef9c77ce46; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000f00; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000002bfd9461; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3727f00000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc7e01fcfe0000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3727112c00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x39201f7120000040; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xe5b9012c00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc7e01fcfe0000000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0204; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000442900007b4c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000e22b0000efa4; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000442800007b50; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0204; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffefffffffe; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op2[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000029; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff0000007f800000; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0fff0fff0fff0fff; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000003f0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffc3ffff003e; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000003f0000ffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffc3ffff003e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000f07f0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff177fffff0fc; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffbfffefffc9510; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffbfffefffc9510; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a09080709080706; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffbfffefffc9510; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffbfffefffc9510; ++ *((unsigned long *)&__m128i_result[1]) = 0x29c251319c3a5c90; ++ *((unsigned long *)&__m128i_result[0]) = 0x62fb9272df7da6b0; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8f8f8f8f8f8f8f8f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8f8f8f8f8f8f8f8f; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x800000007fffffff; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001400000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000053a4f452; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001400000000; ++ __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00680486ffffffda; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff913bfffffffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00680486ffffffda; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff913bfffffffd; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_result[1]) = 0x00680486ffffffda; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff913bb9951901; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0021b761002c593c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x002584710016cc56; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000001e03; ++ *((unsigned long *)&__m128i_result[1]) = 0x0021b761002c593c; ++ *((unsigned long *)&__m128i_result[0]) = 0x002584710016ea59; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000290; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000290; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0002000400000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000500000001; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_op0[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffae001effae; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_op2[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long *)&__m128i_result[1]) = 0xfd200ed2fd370775; ++ *((unsigned long *)&__m128i_result[0]) = 0x96198318780e32c5; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c +new file mode 100644 +index 000000000..fc4cbb4e5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c +@@ -0,0 +1,398 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001801f0307f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001801f0307f80; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010108082626; ++ *((unsigned long *)&__m128i_result[0]) = 0x01010101ffff7878; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00fe000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe80000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x027e0000000000ff; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffff98dea; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xc00fffffffffb4ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xbf0c05fffff98dea; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x010101010101012f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010129; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_result[1]) = 0x1202120212021202; ++ *((unsigned long *)&__m128i_result[0]) = 0x1202120212021202; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x41957fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0xbf6b810181018101; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0108015e01030150; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000017f0000; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf436f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf4b6f3f52f4ef4a8; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000101fd01fe; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m128i_result[0]) = 0xffc0ffc0ffc0ffc0; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fc0010181020103; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fc0ffff81020103; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001e03; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000011e04; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363abdf16; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e08016161198; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000246d9755; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002427c2ee; ++ *((unsigned long *)&__m128i_result[1]) = 0x636363633f3e47c1; ++ *((unsigned long *)&__m128i_result[0]) = 0x41f8e080f1ef4eaa; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001fffe00014b41; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fffe0001ffde; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0002ffffb4bf; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0002ffff0022; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001fc0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000002010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001fbdff0; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03f1e3d28b1a8a1a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000001d5d4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000150d707009; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffe2a2c; ++ *((unsigned long *)&__m128i_result[0]) = 0x03f1e3bd80000000; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffd5002affffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x343d8dc6b0ed5a08; ++ *((unsigned long *)&__m128i_result[1]) = 0x002affd600000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xcbc2723a4f12a5f8; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffdfffffffe0; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffdfffffffe0; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000c2f90000bafa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000c2f90000bafa; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000c2fa8000c2fa; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff3d06ffff4506; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ffffffe7ffff800; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff3fffffff4; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff3fffffff4; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ef8000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8108000000000000; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000063b2ac27; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffaa076aeb; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff63b3584e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffdaa07d5d6; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff81; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff7c; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff7cffd6ffc700b0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x008300290038ff50; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c0dec4d1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff3f213b2f; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c +new file mode 100644 +index 000000000..0d5987567 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c +@@ -0,0 +1,408 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f801fa06451ef11; ++ *((unsigned long *)&__m128i_op0[0]) = 0x68bcf93435ed25ed; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffb64c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000003900; ++ *((unsigned long *)&__m128i_result[0]) = 0x68bcf93435ed25ed; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x460f3b393ef4be3a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x04e00060ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x04e00060ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x04e00060ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x04e00060ffffffff; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001c; ++ *((unsigned long *)&__m128i_result[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x004200a000200000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c03e17edd781b11; ++ *((unsigned long *)&__m128i_op0[0]) = 0x342caf9be5579ebe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000f909; ++ *((unsigned long *)&__m128i_result[1]) = 0x0c03e17edd781b11; ++ *((unsigned long *)&__m128i_result[0]) = 0x342caf9be55700b5; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f000d200e000c20; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fffefffefffef; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0x000fffefffefffef; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4e3e13368c17f6e6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111311111114111; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111311111112111; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x1111311111114111; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111311111110000; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0202fe02fd020102; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefcfefcfefcfefc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfcfc00fc01fcfdfc; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00004000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc5c53492f25acbf2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_result[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_result[0]) = 0xc5c534920000c4ed; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc14eef7fc14ea000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000ea000010fa101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xb); ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000bd3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000c7fff000c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000006ffef000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc2ffe700000007; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffc100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x41dfffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbde2ffe800000007; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffc100010001; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xa000308000008002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0500847b00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x006f0efe258ca851; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff90ffffe0f5; ++ *((unsigned long *)&__m128i_result[0]) = 0x006e7973258d0ef4; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ca02f854; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000d0000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x6363635663636356; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c +new file mode 100644 +index 000000000..f5c82bc74 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c +@@ -0,0 +1,381 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ff02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000001fe; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc6ffe000c6fde000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808081; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_result[0]) = 0x467f6080467d607f; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00fe00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00fe00fe00ff; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff00007fff0000; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c0dec4d1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000040223c2e; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfd200ed2fd370775; ++ *((unsigned long *)&__m128i_op0[0]) = 0x96198318780e32c5; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe65ecc1be5bc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe65ecc1be5bc; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe212874311c22b9; ++ *((unsigned long *)&__m128i_result[0]) = 0x971a9dbaacf34d09; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x0); ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf000e001bf84df83; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff8e001ff84e703; ++ *((unsigned long *)&__m128i_result[1]) = 0x14042382c3ffa481; ++ *((unsigned long *)&__m128i_result[0]) = 0x040c238283ff9d01; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_result[1]) = 0xfebffefffebffeff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfebffefffebffeff; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111111111111111; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000700000004fdff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000300000000fdff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff7fffefffa01ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffbfffefffe01ff; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000cd630000cd63; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000329d0000329d; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x08080807f7f7f7f8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x08080805f5f5f5f8; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00060eb000000006; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000075c00000cf0; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffaf1500000fffa; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000f8a40000f310; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff100fffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffdf100fffc; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000010; ++ __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0013; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001802041b0014; ++ __m128i_out = __lsx_vsub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000f7d1000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x773324887fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff082efffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x88cbdb7780000001; ++ __m128i_out = __lsx_vsub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001f50000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffe0b0000; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000000000001; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffeb; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffeb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000015; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007000000050000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fffe0001fefc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0006000100040001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00010002ffff0105; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000003fffffffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000003fffffffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000003fffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000003fffffffd; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363abdf16; ++ *((unsigned long *)&__m128i_op1[0]) = 0x41f8e08016161198; ++ *((unsigned long *)&__m128i_result[1]) = 0x9c9d9b9bbfaa20e9; ++ *((unsigned long *)&__m128i_result[0]) = 0xbe081c963e6fee68; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c +new file mode 100644 +index 000000000..37e0ccf4d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c +@@ -0,0 +1,329 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff489b693120950; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffc45a851c40c18; ++ *((unsigned long *)&__m128i_result[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long *)&__m128i_result[0]) = 0xe0dd268932a5edf9; ++ __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff88; ++ *((unsigned long *)&__m128i_result[1]) = 0xe5e5e5e5e5e5e5e5; ++ *((unsigned long *)&__m128i_result[0]) = 0xe5e5e5e5e4e4e46d; ++ __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000408; ++ *((unsigned long *)&__m128i_result[1]) = 0xf7f7f7ff8e8c6d7e; ++ *((unsigned long *)&__m128i_result[0]) = 0xf7f7f7f7f7f7fbff; ++ __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x1); ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xe6e6e6e6e6e6e6e6; ++ *((unsigned long *)&__m128i_result[0]) = 0xe6e6e6e6e6e6e6e6; ++ __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m128i_result[0]) = 0xf8f8f8f8f8f8f8f8; ++ __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2e34594c3b000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long *)&__m128i_result[0]) = 0x171d423524e9e9e9; ++ __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffe2ffe2ffe2ffe2; ++ *((unsigned long *)&__m128i_result[0]) = 0xffe2ffe2ffe2ffe2; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9795698585057dec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x87f82867431a1d08; ++ *((unsigned long *)&__m128i_result[1]) = 0x9780697084f07dd7; ++ *((unsigned long *)&__m128i_result[0]) = 0x87e3285243051cf3; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffcfffcfffc00fd; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x371fe00000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x371fe00000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_result[0]) = 0x370bdfecffecffec; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040600000406; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020202020202fe02; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff503fbfff503fb; ++ *((unsigned long *)&__m128i_result[0]) = 0x01f701f701f7fdf7; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffdfffdfffdfffd; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x803e0000803e0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x803e0000803e0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x803bfffd803bfffd; ++ *((unsigned long *)&__m128i_result[0]) = 0x803bfffd803bfffd; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffdfffdfffdfffd; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffedffedffedffed; ++ *((unsigned long *)&__m128i_result[0]) = 0xffedffedffedffed; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffe4ffe4ffe4ffe4; ++ *((unsigned long *)&__m128i_result[0]) = 0xffe4ffe4ffe4ffe4; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffefffffffef; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffe6ffffffe6; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffe6ffffffe6; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff1fffffff1; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff6fffffff6; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff6fffffff6; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffe4ffffffe4; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffe1ffffffe1; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffe1ffffffe1; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff1fffffff1; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffab7e71e33848; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffe1ffffffe1; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffab5f71e33829; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0024d8f6a494006a; ++ *((unsigned long *)&__m128i_result[1]) = 0xa8beed87bc3f2bd3; ++ *((unsigned long *)&__m128i_result[0]) = 0x0024d8f6a494005c; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffeb; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffeb; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffe1; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffe5; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf2f2e5e5e5e5e5e5; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf2f2e5e5e5e5e5dc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3ffffeffffffffe5; ++ *((unsigned long *)&__m128i_result[0]) = 0x3ffffeffffffffe5; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000070; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff5; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff0; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe6; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffe6; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100010000fffb; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100010000fffb; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffeb; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffeb; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffa; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffe80008000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe2; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffdfffe80007fe2; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001a001a001a001a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001a001a001a001a; ++ *((unsigned long *)&__m128i_result[1]) = 0x001a001a001a000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x001a001a001a000b; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000234545b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c0dec4d1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000002345454; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c0dec4ca; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0f8d33000f8d3300; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003b80000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0f8d33000f8d32fd; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003b7fffffffffd; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c +new file mode 100644 +index 000000000..f0d391a09 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c +@@ -0,0 +1,326 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffd3000000130000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffd3000000130000; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100010000ffda; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000016; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffbfbfbfc0; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffbfbfbfc0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[1]) = 0xffbfffbfff7fff80; ++ *((unsigned long *)&__m128i_result[0]) = 0xffbfffbfff7fff80; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000808000020200; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff8000020000; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x413e276583869d79; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f7f017f9d8726d3; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7c7cd2eb63637c52; ++ *((unsigned long *)&__m128i_op1[0]) = 0x82ffd2210127add2; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc2007aff230027; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080005eff600001; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffe1ffc0; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000004000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffc000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000d; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ffff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000100c6ffef10c; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffff01; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffeff400000df4; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001c88bf0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000320; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000007730; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001030103; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000103; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x33eac9fdca42f660; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaa472d26fe867091; ++ *((unsigned long *)&__m128i_op1[1]) = 0x33eac9fdca42f660; ++ *((unsigned long *)&__m128i_op1[0]) = 0xaa472d26fe867091; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x05fafe0101fe000e; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff7a86; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffe01fff2; ++ __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf654ad7447e59090; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b1b106b8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffb81a6f70; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000047eba0b0; ++ __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000c01020d8009; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000003004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000c01020d5005; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffff01ff01; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f804f804f804f80; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xb9fe3640e4eb1b18; ++ *((unsigned long *)&__m128i_op0[0]) = 0x800000005b4b1b18; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffb9fe00003640; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe4eb00001b18; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x80001b155b4b0000; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100080000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffefff80000; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fc03fc000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fc03fc000000003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f7f1fd800000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xc0411fe800000000; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01e41ffff0e440; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01e420fff0e442; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c +new file mode 100644 +index 000000000..3b18bc13c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c +@@ -0,0 +1,417 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ff00000083; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xff01ff010000ff7d; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000fffc; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff00fc0000ff02; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01ff040000fffe; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x21011f3f193d173b; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff39ff37ff35ff33; ++ *((unsigned long *)&__m128i_result[1]) = 0x00fe008e009e0071; ++ *((unsigned long *)&__m128i_result[0]) = 0x001c006f00c4008d; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9ca19d509ae734; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd1b09480f2123460; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001fffeff98; ++ *((unsigned long *)&__m128i_result[0]) = 0x0014ffe4ff76ffc4; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x34947b4b11684f92; ++ *((unsigned long *)&__m128i_op1[0]) = 0xee297a731e5c5f86; ++ *((unsigned long *)&__m128i_result[1]) = 0xff6cffb5ff98ff6e; ++ *((unsigned long *)&__m128i_result[0]) = 0xffd7ff8dffa4ff7a; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff3ea5016b; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffefffe3f6fb04d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000d96f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffd83b; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000f0009d3c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000016fff9d3d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000bd0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000007f0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000916c; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010000954d; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000100010000fe01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000050000007b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000500000005; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffbffffff85; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffc0000fdfc; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000032; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000032; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff80df00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xa5c4c774856ba837; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2a569f8081c3bbe9; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff6080ffff4417; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000063b2ac27; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffaa076aeb; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0001ffff9515; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00060fbf00040fbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020fbf00000fbf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffac5cffffac5c; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffac5cffffac5c; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffaefffbffaefffb; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffaefffbffaefffb; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0005ffff0005; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000500000004; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000a1630000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000a1630000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001fd0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001fd0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff7ffffef77fffdd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf77edf9cffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000008800022; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000001; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffda6f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffe3d7; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffda6e; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffe3d6; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000807f00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80006b0080808080; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff00011cf0c569; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc0000002b0995850; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffe30f3a97; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffcfe72830; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff9f5c25; ++ *((unsigned long *)&__m128i_op0[0]) = 0x58fa6b4000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ff9f5c25; ++ *((unsigned long *)&__m128i_op1[0]) = 0x58fa6b4000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcda585aebbb2836a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffc4cdfd16; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x801dd5cb0004e058; ++ *((unsigned long *)&__m128i_op0[0]) = 0x77eb15638eeb5fc2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000200000001b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000004e03d; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000008eeb5fc2; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000c0000bd49; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000c7fff000c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000100c6ffef00d; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000c00000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000bfffffffe0f6; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffdfffcfffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffdfffcfffd; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff7e00000081; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff6080ffff4417; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a0aa9890a0ac5f3; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x36fbdfdcffdcffdc; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffeffff; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a753500a9fa0d06; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf589caff5605f2fa; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x087c000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000087c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f8000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001000010f8; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffefffff784; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000000000000; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c +new file mode 100644 +index 000000000..39ebff154 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c +@@ -0,0 +1,326 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc485edbcc0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x003f000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007c000d00400000; ++ __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x841f000fc28f801f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007c0000003e0080; ++ __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001001; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff8000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff8000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefe6a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffc2ba; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000027f000000fe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe80000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000018000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff7a53; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff0000ff86; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffa6ff91fdd8ef77; ++ *((unsigned long *)&__m128i_op1[0]) = 0x061202bffb141c38; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000005a00000228; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff9ee000004ec; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000001fe02000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000001fe02000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb020302101b03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000002345454; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c0dec4ca; ++ *((unsigned long *)&__m128i_result[1]) = 0x000030ebffffffdc; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000203ffffff25; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x380fdfdfc0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffc7f100004000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00005dcbe7e830c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000015d926c7; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000e41b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000005dcb; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00f0008100800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00f000807000009e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000ec382e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ec382d; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfcfcfcfcfcfc0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffcfcfcfc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffcfc6080; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffaefffbffaefffb; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffaefffbffaefffb; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffc105d1aa; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffbc19ecca; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000101fd01fe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffff0000000ad3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff000fffff000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xefffdffff0009d3d; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffff01; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100010001007c; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5f675e96e29a5a60; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe000100cf005f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x5e695e95e1cb5a01; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7efefefe82010201; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c +new file mode 100644 +index 000000000..62837f1ac +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c +@@ -0,0 +1,308 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe07e5fefefdddfe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00020100fedd0c00; ++ *((unsigned long *)&__m128i_result[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffff02fff4; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefff6fff80002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x82c53a0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc72ef153fc02fdf7; ++ *((unsigned long *)&__m128i_result[1]) = 0x007d00c500ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0038000e0003ff03; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007f000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000040000000400; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff800000000000; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fd1400010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe00fd1400010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff017fffff017f; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff017fffff017f; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00009c7c00007176; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000009; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000408; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff0007e215b122; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ffeffff7bfff828; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80010001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff80010001; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000af555555555; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000af555555555; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000af5; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000af5; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2e34594c3b000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000002e34594c; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000036280001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x42a0000042a02001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000036280001; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd0b1ffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9d519ee8d2d84f1d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8644ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4a6d0000ffff0000; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x82c539ffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc72df14afbfafdf9; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7d3ac60000000000; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00fe00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000fffffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000102020204000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefff00000001fff; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0003000300000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003000300a10003; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffd00000000; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x6363636163636363; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-vabsd-vmskgez-vmsk.patch b/LoongArch-Add-tests-for-SX-vector-vabsd-vmskgez-vmsk.patch new file mode 100644 index 0000000000000000000000000000000000000000..c99d450095b8eed6a0f173683016860ed8cbdcac --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-vabsd-vmskgez-vmsk.patch @@ -0,0 +1,1710 @@ +From 7fc7953897e6ff488eebd5ea769447b7a1a7a0ed Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 18:48:08 +0800 +Subject: [PATCH 087/124] LoongArch: Add tests for SX vector + vabsd/vmskgez/vmskltz/vmsknz/vsigncov instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmskgez.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmskltz.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmsknz.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsigncov.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vabsd-1.c | 272 +++++++++++ + .../loongarch/vector/lsx/lsx-vabsd-2.c | 398 ++++++++++++++++ + .../loongarch/vector/lsx/lsx-vmskgez.c | 119 +++++ + .../loongarch/vector/lsx/lsx-vmskltz.c | 321 +++++++++++++ + .../loongarch/vector/lsx/lsx-vmsknz.c | 104 +++++ + .../loongarch/vector/lsx/lsx-vsigncov.c | 425 ++++++++++++++++++ + 6 files changed, 1639 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c +new file mode 100644 +index 000000000..e336581f3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c +@@ -0,0 +1,272 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfda9b23a624082fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001010000; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffd000700000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0014fff500000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f03000780000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f15000a7f010101; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000060000000e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000127fffffea; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f0101070101010f; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000127f010116; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x67eb85af0000b000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x67157b5100005000; ++ *((unsigned long *)&__m128i_result[0]) = 0x387c7e0a133f2000; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff7fffefffa01ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffbfffefffe01ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0305030203020502; ++ *((unsigned long *)&__m128i_result[0]) = 0x0301030203020502; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4ee376188658d85f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5728dcc85ac760d2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4e1d76187a58285f; ++ *((unsigned long *)&__m128i_result[0]) = 0x572824385a39602e; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf654ad7447e59090; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b1b106b8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0x0a545374471b7070; ++ *((unsigned long *)&__m128i_result[0]) = 0x274f4f0648145f50; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21f32eafa486fd38; ++ *((unsigned long *)&__m128i_op0[0]) = 0x407c2ca3d3430357; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x21f32eaf5b7a02c8; ++ *((unsigned long *)&__m128i_result[0]) = 0x407c2ca32cbd0357; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003bfb4000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000003bfb4000; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100010001; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffdf; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000021; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000700000004e000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003000000012020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0038000000051fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x003c000000022021; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9d9b9bbfaa20e9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbe081c963e6fee68; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fd1654860000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6363636463abdf17; ++ *((unsigned long *)&__m128i_result[0]) = 0x41f8e08016161198; ++ __m128i_out = __lsx_vabsd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01fe0400000006; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000005fffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe01fc0005fff4; ++ __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x010003f00000ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x017f03000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x010003f00000ff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x017f03000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000001fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000001ffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffac0a000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffac0a000000; ++ __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c +new file mode 100644 +index 000000000..c1af80e14 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c +@@ -0,0 +1,398 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0024d8f6a494006a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x5641127843c0d41e; ++ *((unsigned long *)&__m128i_result[0]) = 0xfedb27095b6bff95; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000383ffff1fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ca354688; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000038335ca2777; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fff80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fff80000; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001fd0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001fd0; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ff08ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ff08ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff0; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefff00000001fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffe1ffc100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000400000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffe1ffc100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefff00000401fff; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff000000ff000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff000000ff000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff000000ff000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff000000ff000000; ++ __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128i_op0[0]) = 0x545cab1d7e57c415; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128i_result[0]) = 0x545cab1d81a83bea; ++ __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcfb799f1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0282800002828282; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5555001400005111; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffabbeab55110140; ++ *((unsigned long *)&__m128i_result[1]) = 0xaaaaffebcfb748e0; ++ *((unsigned long *)&__m128i_result[0]) = 0xfd293eab528e7ebe; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x56a09e662ab46b31; ++ *((unsigned long *)&__m128i_op0[0]) = 0xb4b8122ef4054bb3; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x56a09e662ab46b31; ++ *((unsigned long *)&__m128i_result[0]) = 0xb4b8122ef4054bb3; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3c600000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xc39fffff007fffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000fe00fd; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8006000080020000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8004000080020000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8006000080020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8004000080020000; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff0015172b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffb00151727; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffbfffffff8; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffbfffffff8; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffbffffffd8; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffbfffffff8; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000006; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001fffffff9; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x64b680a2ae3af8c8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x161c0c363c200824; ++ *((unsigned long *)&__m128i_result[1]) = 0x23b57fa16d39f7c8; ++ *((unsigned long *)&__m128i_result[0]) = 0x161c0c363c200824; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fffff0000000000; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1ffffffff8001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf0bd80bd80bd8000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1ffffffff8001000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf0bd80bd80bd8000; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffef8; ++ *((unsigned long *)&__m128i_result[0]) = 0xffdfffdfffdffee0; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c +new file mode 100644 +index 000000000..64a950f81 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c +@@ -0,0 +1,119 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0403cfcf01c1595e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x837cd5db43fc55d4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000cb4a; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff7f01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000d; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000033; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fffe00006aea; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffce; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c +new file mode 100644 +index 000000000..8f743ec2e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c +@@ -0,0 +1,321 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd83c8081ffff8080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000f; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000013d; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000f0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000100010001fffd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000001007c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9780697084f07dd7; ++ *((unsigned long *)&__m128i_op0[0]) = 0x87e3285243051cf3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000cdc1; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x05d0ae6002e8748e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcd1de80217374041; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000065a0; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000004b01; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d1c1b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000f; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff08ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003f3f; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000022; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000008080600; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0018; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000035697d4e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000013ecaadf2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000006de1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5f9ccf33cf600000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003ffffe00800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000034; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc3818bffe7b7a7b8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x63636b6afe486741; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e880ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000027; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c +new file mode 100644 +index 000000000..d547af0d3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c +@@ -0,0 +1,104 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001e1f; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c63636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x009500b10113009c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x009500b10113009c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000005d5d; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000fe; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000007f41; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0014001400140000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000554; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x202544f490f2de35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x202544f490f2de35; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000a74aa8a55ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6adeb5dfcb000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003ff8; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x317fce80317fce80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c +new file mode 100644 +index 000000000..0fb1bc18f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c +@@ -0,0 +1,425 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00003f803f800100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x870968c1f56bb3cd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf000e001bf84df83; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff8e001ff84e703; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ca354688; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff35cab978; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6a57a30ff0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe00fe00fe00fd01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fffefe0100f6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff0000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100010000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100010000010000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000020000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000183fffffe5; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000400000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000400000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff3d06ffff4506; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ffffffe7ffff800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff6fff6fff6fff6; ++ __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f80000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f80000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52525252525252cb; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52525252525252cb; ++ *((unsigned long *)&__m128i_result[1]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long *)&__m128i_result[0]) = 0xaeaeaeaeaeaeae35; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_result[1]) = 0x370bdfec00130014; ++ *((unsigned long *)&__m128i_result[0]) = 0x370bdfec00130014; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002020002020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x021f3b0205150600; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000300400002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000100010040fffb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000300400002; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100010040fffb; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff801c9e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000810000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x008003496dea0c61; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101030100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1ab6021f72496458; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7750af4954c29940; ++ *((unsigned long *)&__m128i_result[1]) = 0xe64afee18eb79ca8; ++ *((unsigned long *)&__m128i_result[0]) = 0x89b051b7ac3e67c0; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x441ba9fcffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x181b2541ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff81010102; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000045340a6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000028404044; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x67eb85af0000b000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000103; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffc; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003ffffe00800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004001be00dc008e; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffff0100010001; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff9fffefff9ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x04faf60009f5f092; ++ *((unsigned long *)&__m128i_op1[0]) = 0x04fafa9200000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfc06066e00000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffe0002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000667ae56; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000667ae56; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000100020002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000100020002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000100020002; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffe1ffc0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffe1ffc0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffe1ffc0; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-vand-vandi-vandn-v.patch b/LoongArch-Add-tests-for-SX-vector-vand-vandi-vandn-v.patch new file mode 100644 index 0000000000000000000000000000000000000000..82ff843e32f6147d086cfa8cbbc45398d98cedb3 --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-vand-vandi-vandn-v.patch @@ -0,0 +1,1209 @@ +From ea0d56b6569735448905780fe8468c9b3c6aad14 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 18:58:17 +0800 +Subject: [PATCH 097/124] LoongArch: Add tests for SX vector + vand/vandi/vandn/vor/vori/vnor/ vnori/vxor/vxori instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vand.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vandi.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vandn.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vnor.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vnori.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vor.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vori.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vorn.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vxor.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vxori.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vand.c | 159 ++++++++++++++++ + .../loongarch/vector/lsx/lsx-vandi.c | 67 +++++++ + .../loongarch/vector/lsx/lsx-vandn.c | 129 +++++++++++++ + .../loongarch/vector/lsx/lsx-vnor.c | 109 +++++++++++ + .../loongarch/vector/lsx/lsx-vnori.c | 91 ++++++++++ + .../gcc.target/loongarch/vector/lsx/lsx-vor.c | 169 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vori.c | 123 +++++++++++++ + .../loongarch/vector/lsx/lsx-vorn.c | 109 +++++++++++ + .../loongarch/vector/lsx/lsx-vxor.c | 79 ++++++++ + .../loongarch/vector/lsx/lsx-vxori.c | 67 +++++++ + 10 files changed, 1102 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c +new file mode 100644 +index 000000000..1597749b5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c +@@ -0,0 +1,159 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x001fffff001fffff; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff9515; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85af0000b000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x67eb85af0000b000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0x67eb85af0000b000; ++ *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0313100003131000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0313100003131000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0007000000050000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0003000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00007a8000000480; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000485000004cc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c +new file mode 100644 +index 000000000..906da69ca +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c +@@ -0,0 +1,67 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x36); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x39); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x27); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0xbd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000a95afc60a5c5; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000b6e414157f84; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000204264602444; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000266404046604; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x66); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c +new file mode 100644 +index 000000000..3ae2d7694 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c +@@ -0,0 +1,129 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000210011084; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000049000000c0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffff29; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000049000000c0; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff29; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x010f00000111fffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0x016700dc0176003a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0003000000010000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffff000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf0bc9a5278285a4a; ++ *((unsigned long*)& __m128i_result[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long*)& __m128i_result[0]) = 0xf0bc9a5278285a4a; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0004fffe0004; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c +new file mode 100644 +index 000000000..a7a3acce9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c +@@ -0,0 +1,109 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff80000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff80000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00070007; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0007ffff; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xce23d33e43d9736c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x63b2ac27aa076aeb; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x31dc2cc1bc268c93; ++ *((unsigned long*)& __m128i_result[0]) = 0x9c4d53d855f89514; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff3; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000400080003fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000bc2000007e04; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000400080003fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000bc2000007e04; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffbfff7fffc000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff43dfffff81fb; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x021b7d24c9678a35; ++ *((unsigned long*)& __m128i_op0[0]) = 0x030298a6a1030a49; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_result[1]) = 0xada4808924882588; ++ *((unsigned long*)& __m128i_result[0]) = 0xacad25090caca5a4; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe0000ff18; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c +new file mode 100644 +index 000000000..a07a02ab2 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c +@@ -0,0 +1,91 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xcccccccc0000cccc; ++ *((unsigned long*)& __m128i_result[0]) = 0xcccccccc0000cccc; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x33); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0xa6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3e035e51522f0799; ++ *((unsigned long*)& __m128i_result[1]) = 0x9292929292929292; ++ *((unsigned long*)& __m128i_result[0]) = 0x8090808280909002; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x6d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ffc2f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00201df000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3838383838300010; ++ *((unsigned long*)& __m128i_result[0]) = 0x3818200838383838; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0xc7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; ++ *((unsigned long*)& __m128i_result[1]) = 0x5d5d5d5d5d5d5d5d; ++ *((unsigned long*)& __m128i_result[0]) = 0x5d5d5d5d5d5d0000; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0xa2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x7f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x1313131313131313; ++ *((unsigned long*)& __m128i_result[0]) = 0x1313131313131313; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0xec); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x9d9d9d9d9d9d9d9d; ++ *((unsigned long*)& __m128i_result[0]) = 0x9d9d9d9d9d9d9d9d; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x62); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00f525682ffd27f2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00365c60317ff930; ++ *((unsigned long*)& __m128i_result[1]) = 0xe500c085c000c005; ++ *((unsigned long*)& __m128i_result[0]) = 0xe5c1a185c48004c5; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c +new file mode 100644 +index 000000000..537a1bb3b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c +@@ -0,0 +1,169 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7e44bde9b842ff23; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00011e80007edff8; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffc001fffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffc001fffffffff; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3e035e51522f0799; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3e035e51522f0799; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3e035e51522f0799; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff8000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff8000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff8000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80005613; ++ *((unsigned long*)& __m128i_op0[0]) = 0x81000080806b000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff00011cf0c569; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc0000002b0995850; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff9cf0d77b; ++ *((unsigned long*)& __m128i_result[0]) = 0xc1000082b0fb585b; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffbfff8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffbfffb; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001ffff0101ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001ffff; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffc105d1aa; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbc19ecca; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffff9bffbfb; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffdffdfb; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c +new file mode 100644 +index 000000000..8a6e035c9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c +@@ -0,0 +1,123 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8282828282828282; ++ *((unsigned long*)& __m128i_result[0]) = 0x8282828282828282; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x82); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7505853d654185f5; ++ *((unsigned long*)& __m128i_op0[0]) = 0x01010000fefe0101; ++ *((unsigned long*)& __m128i_result[1]) = 0x7545c57d6541c5f5; ++ *((unsigned long*)& __m128i_result[0]) = 0x41414040fefe4141; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x40); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000c2f90000bafa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000c2fa8000c2fa; ++ *((unsigned long*)& __m128i_result[1]) = 0x7474f6fd7474fefe; ++ *((unsigned long*)& __m128i_result[0]) = 0xf474f6fef474f6fe; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x74); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m128i_result[0]) = 0x3d3d3d3d3d3d3d3d; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x3d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffadffedbfefe; ++ *((unsigned long*)& __m128i_result[0]) = 0x5f5f7bfedefb5ada; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x5a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x38); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0d1202e19235e2bc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xea38e0f75f6e56d1; ++ *((unsigned long*)& __m128i_result[1]) = 0x2f3626e7b637e6be; ++ *((unsigned long*)& __m128i_result[0]) = 0xee3ee6f77f6e76f7; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x26); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_result[1]) = 0xd6d7ded7ded7defe; ++ *((unsigned long*)& __m128i_result[0]) = 0xd6d7ded7ded7defe; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0xd6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0000fffe0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7777777777777777; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff7777ffff7777; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x77); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x55); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xd454545454545454; ++ *((unsigned long*)& __m128i_result[0]) = 0xd454545454545454; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x54); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x4f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8a8a8a8a8a8a8a8a; ++ *((unsigned long*)& __m128i_result[0]) = 0x8a8a8a8a8a8a8a8a; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x8a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c +new file mode 100644 +index 000000000..bb59bc312 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c +@@ -0,0 +1,109 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000100010001fffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00d3012b015700bb; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00010000ffca0070; ++ *((unsigned long*)& __m128i_result[1]) = 0xff2cfed4fea8ff44; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffeffff0035ff8f; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00fe00fe00fe0045; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00fe00fe00fe0045; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000010101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101000001000100; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000010000010101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0101000001000100; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe4423f7b769f8ffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00050eb00000fffa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000f8a50000f310; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff008ff820; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010012; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffe1ffc0; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff009ff83f; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c +new file mode 100644 +index 000000000..72fa97174 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c +@@ -0,0 +1,79 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000f4012ceb; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000f4012ceb; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x40f3fa0000000000; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000068; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000068; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff000001ffff9515; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9514; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff0000ac26; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000000001; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c +new file mode 100644 +index 000000000..cc823d4ba +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c +@@ -0,0 +1,67 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; ++ *((unsigned long*)& __m128i_result[0]) = 0x0404040404040404; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long*)& __m128i_result[0]) = 0x5a5a5a5a5b5a5b5a; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0x5a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long*)& __m128i_result[0]) = 0xe3e3e3e3e3e3e3e3; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0xe3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; ++ *((unsigned long*)& __m128i_result[1]) = 0x9a9a9a9a9a9a9a9a; ++ *((unsigned long*)& __m128i_result[0]) = 0x9aba9aba9aba9aba; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0x9a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x9090909090909090; ++ *((unsigned long*)& __m128i_result[0]) = 0x9090909090909090; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0x90); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000b81c8382; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000077af9450; ++ *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long*)& __m128i_result[0]) = 0xf1f1f1f1865e65a1; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0xf1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-vavg-vavgr-instruc.patch b/LoongArch-Add-tests-for-SX-vector-vavg-vavgr-instruc.patch new file mode 100644 index 0000000000000000000000000000000000000000..2d03fdd914af4d755b975e7a91d273df13fa0744 --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-vavg-vavgr-instruc.patch @@ -0,0 +1,1375 @@ +From 4fba531ee417a29234e8be84e17cddc7dd9ec343 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 18:35:55 +0800 +Subject: [PATCH 084/124] LoongArch: Add tests for SX vector vavg/vavgr + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vavg-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vavg-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vavg-1.c | 398 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vavg-2.c | 308 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vavgr-1.c | 299 +++++++++++++ + .../loongarch/vector/lsx/lsx-vavgr-2.c | 317 ++++++++++++++ + 4 files changed, 1322 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c +new file mode 100644 +index 000000000..2177ca3f6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c +@@ -0,0 +1,398 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4050000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x2028000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000014155445; ++ *((unsigned long *)&__m128i_result[1]) = 0x33f5c2d7d9f5d800; ++ *((unsigned long *)&__m128i_result[0]) = 0xe4c23ffb002a3a22; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000f000e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000ffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x003fffff00070007; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000007ffff; ++ __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400028000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000020001c020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000022; ++ __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x08080807f5f5f5f8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x04040403fafafafc; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff80; ++ __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x10f8000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001000010f8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x087c000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000087c; ++ __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5641127843c0d41e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfedb27095b6bff95; ++ *((unsigned long *)&__m128i_op1[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0024d8f6a494006a; ++ *((unsigned long *)&__m128i_result[1]) = 0xff7fffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff7fffffffffffff; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00007fff; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff80ff0010ff06; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf6fd377cf705f680; ++ *((unsigned long *)&__m128i_result[0]) = 0xc0000000bfff8000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x1ff800000000477f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000015fec9b0; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000037; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x003fffff00000000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000008000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd6a09e662ab46b31; ++ *((unsigned long *)&__m128i_op0[0]) = 0x34b8122ef4054bb3; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xeb504f33155a3598; ++ *((unsigned long *)&__m128i_result[0]) = 0x1a5c0917fa02a5d9; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000011ff8bc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffefffff784; ++ *((unsigned long *)&__m128i_result[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff008ff820; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0014; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000c01020d8009; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007fff8000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001008100000005; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fd1400010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fff7fc01; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe00fe8980000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff007e8a7ffc7e00; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff46000000ba; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffa30000005c; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000070007; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000007ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000068; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000038003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000040033; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff0000ac26; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80005613; ++ *((unsigned long *)&__m128i_result[0]) = 0x007f800000000000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000040000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000040000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fc000005fc00000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fc000005fc00000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff0001fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff0000ffff; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c +new file mode 100644 +index 000000000..1b0d879e4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c +@@ -0,0 +1,308 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000100000001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x37b951002d81a921; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000047404f4f040d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000082000000826; ++ *((unsigned long *)&__m128i_result[0]) = 0x1b5c4c203e685617; ++ __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128i_result[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_result[0]) = 0x00a975be00accf03; ++ __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff732a; ++ *((unsigned long *)&__m128i_result[1]) = 0x807f7fff807f807f; ++ *((unsigned long *)&__m128i_result[0]) = 0x807f807f7fff3995; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000003fbf3fbf; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7ff8; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x353c8cc4b1ec5b09; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080008000808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x1a9e466258f62d84; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ac; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x4e4e4e4e00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000868686868686; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1e1e1e1e1e1e1e1e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1e1e1e1e1e1e1e1e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0f0f0f0f0f0f0f0f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f0f525252525252; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffdfdc0d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a753500950fa306; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff00010000fff; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000002ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000017fffffff; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101030100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080800000008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080818000008000; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0017004800c400f9; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ed001a00580070; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x800b7fe38062007b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0076800d802c0037; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff76ffd8ffe6ffaa; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0xe01ae8a3fc55dd23; ++ *((unsigned long *)&__m128i_result[0]) = 0xdd9ff64ef9daeace; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fffffff; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f80000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f80000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x1fc0000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1fc07f8000007f80; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000043cf26c7; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000e31d4cae8636; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000021e79364; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000718ea657431b; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ff8000000000000; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff80ffff7e02; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00feff8000ff80ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf931fd04f832fe02; ++ *((unsigned long *)&__m128i_result[1]) = 0x80007fc000003f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x7d187e427c993f80; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c +new file mode 100644 +index 000000000..4b7262537 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c +@@ -0,0 +1,299 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0040000000ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0040000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0020000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0020c00000000000; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xb9fe3640e4eb1b18; ++ *((unsigned long *)&__m128i_op0[0]) = 0x800000005b4b1b18; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xdcfe1b20f2f60e0c; ++ *((unsigned long *)&__m128i_result[0]) = 0xc00000002e260e0c; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x111110ff11111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfbffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7bffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x060808ff08080820; ++ *((unsigned long *)&__m128i_result[0]) = 0x4608081808080810; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000fff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000ac26; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000003000000d613; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c0000000; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff2; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff2; ++ __m128i_out = __lsx_vavgr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000073; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000002a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000003a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000015; ++ __m128i_out = __lsx_vavgr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000004000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff8004000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffc002000000000; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffc002000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffc002000000000; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff00000000; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000007fff0018; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000003fff800c; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0280000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7500000075000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7500000075000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3bc000003a800000; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007d1800007c99; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0a0000001e000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a621b3ebe5e1c02; ++ *((unsigned long *)&__m128i_result[1]) = 0x04ffc0000f000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x05314c2bdf2f4c4e; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001000000000; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fc000003fc00000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fc000003fc00000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fffffffc0000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0xff807f807f807f80; ++ *((unsigned long *)&__m128i_result[0]) = 0xff807f807f807f80; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000280000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000140001; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff46; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe00fe0045; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f007f007f007e; ++ *((unsigned long *)&__m128i_result[0]) = 0x007f007f007effc6; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long *)&__m128i_result[0]) = 0xe4423f7b769f8ffe; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff9dff9dff9dff9d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffceffceffcf1fcb; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3bc000003a800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x1d4000001d400000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1e5f007f5d400000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000400000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000007f80; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c +new file mode 100644 +index 000000000..22908b1ea +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c +@@ -0,0 +1,317 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff01018888; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4080808080808080; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010000003f; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1817161517161514; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1615141315141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x0c0c8b8a8b8b0b0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x8b8a8a898a8a8909; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000208000002080; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffd60015; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x80808080806b000b; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff81010102; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc1bdceee242071db; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe8c7b756d76aa578; ++ *((unsigned long *)&__m128i_result[1]) = 0xe0dee7779210b8ed; ++ *((unsigned long *)&__m128i_result[0]) = 0xf463dbabebb5d2bc; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff80000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000400400004004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000015ff4a31; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2a7b7c9260f90ee2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1b1c6cdfd57f5736; ++ *((unsigned long *)&__m128i_result[1]) = 0x153e3e49307d0771; ++ *((unsigned long *)&__m128i_result[0]) = 0x0d8e36706ac02b9b; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xdd6156076967d8c9; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2e3ab5266375e71b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x6eb12b0634b46c67; ++ *((unsigned long *)&__m128i_result[0]) = 0x171d5a9531bb7390; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000090900000998; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00007a8000000480; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000485000004cc; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003ff000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fffc00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc001fffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001ff800000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ffe800e80000000; ++ __m128i_out = __lsx_vavgr_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff000001ffff9515; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007fffa9ed; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8000017fffca8b; ++ __m128i_out = __lsx_vavgr_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffdfffffff8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7ffffffc; ++ __m128i_out = __lsx_vavgr_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffeff98; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0014ffe4ff76ffc4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3131313131313131; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000017fff7fcc; ++ *((unsigned long *)&__m128i_result[0]) = 0x18a3188b9854187b; ++ __m128i_out = __lsx_vavgr_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001c88bf0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001c88bf0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001c88bf0; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x807fffff80800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x8003000000020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4040ffffc0400004; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010000010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101000001000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000008000008080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080800000800080; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-vbitclr-vbitclri-v.patch b/LoongArch-Add-tests-for-SX-vector-vbitclr-vbitclri-v.patch new file mode 100644 index 0000000000000000000000000000000000000000..10f72c8676991859e2fdd4872588609ba99df5ee --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-vbitclr-vbitclri-v.patch @@ -0,0 +1,3324 @@ +From 0b75b581703b0eb1eb9ca9e898255de7f4cb51ad Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 11:20:44 +0800 +Subject: [PATCH 092/124] LoongArch: Add tests for SX vector + vbitclr/vbitclri/vbitrev/vbitrevi/ + vbitsel/vbitseli/vbitset/vbitseti/vclo/vclz/vpcnt instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vbitclr.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vbitclri.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vbitrev.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vbitsel.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vbitseli.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vbitset.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vbitseti.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vclo.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vclz.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vpcnt.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vbitclr.c | 461 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vbitclri.c | 279 +++++++++++ + .../loongarch/vector/lsx/lsx-vbitrev.c | 407 ++++++++++++++++ + .../loongarch/vector/lsx/lsx-vbitrevi.c | 336 +++++++++++++ + .../loongarch/vector/lsx/lsx-vbitsel.c | 109 +++++ + .../loongarch/vector/lsx/lsx-vbitseli.c | 84 ++++ + .../loongarch/vector/lsx/lsx-vbitset.c | 371 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vbitseti.c | 279 +++++++++++ + .../loongarch/vector/lsx/lsx-vclo.c | 266 ++++++++++ + .../loongarch/vector/lsx/lsx-vclz.c | 265 ++++++++++ + .../loongarch/vector/lsx/lsx-vpcnt.c | 350 +++++++++++++ + 11 files changed, 3207 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c +new file mode 100644 +index 000000000..411dcaa40 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c +@@ -0,0 +1,461 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000e0000000e0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00e0000000e00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000e0000000e0; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000004000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff8004000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x19df307a5d04acbb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5ed032b06bde1ab6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x19de307a5d04acba; ++ *((unsigned long *)&__m128i_result[0]) = 0x5ed032b06bde1ab6; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0018001800180018; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0018001800180018; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd8248069ffe78077; ++ *((unsigned long *)&__m128i_op1[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd83c8081ffff808f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xd82480697f678077; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000006597cc3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7505853d654185f5; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01010000fefe0101; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000006595cc1d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffe0000fffe0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffe0000fffe0000; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff7fc01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x80000000fff6fc00; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffefffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffef800; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffefffffffe; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x23b57fa16d39f7c8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x161c0c363c200824; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000ffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000ffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000fefe00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fefe00000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1ffffffff8001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf0bd80bd80bd8000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7ffffffefffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xdfffdfffdffffffe; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000037; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000036; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100000001007c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000000010000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefa000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefa000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67b7cf643c9d636a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x39d70e366f547977; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x66b34f643c9c626a; ++ *((unsigned long *)&__m128i_result[0]) = 0x38d60e366e547876; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_result[0]) = 0x2020202020207f7f; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x7ef8000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ef8000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000077f97; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffeff7f0000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long *)&__m128i_result[0]) = 0x685670d27e00682a; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001b4a00007808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100010001000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000100; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x5d7f5d007f6a007f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffefffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffefffe; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x207fffff22bd04fb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x207fffff22bd04fb; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000002000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000002000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x207fffff22bd04fa; ++ *((unsigned long *)&__m128i_result[0]) = 0x207fffff22bd04fa; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffefffe; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007fff7fff8000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000b81c8382; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000077af9450; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007efe7f7f8000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000667ae56; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000004ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000667ae56; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c +new file mode 100644 +index 000000000..5d7d66e06 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c +@@ -0,0 +1,279 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000201000000000b; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200000; ++ *((unsigned long *)&__m128i_result[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x004200a000200000; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000efffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002ff5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc2cf2471e9b7d7a4; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000027f5; ++ *((unsigned long *)&__m128i_result[0]) = 0xc2cf2471e9b7d7a4; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_result[1]) = 0x7404443064403aec; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000d6eefefc0498; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x36); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x64b680a2ae3af8ca; ++ *((unsigned long *)&__m128i_op0[0]) = 0x161c0c363c200826; ++ *((unsigned long *)&__m128i_result[1]) = 0x64b680a2ae3af8c8; ++ *((unsigned long *)&__m128i_result[0]) = 0x161c0c363c200824; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff807f807f807f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff807f807f807f80; ++ *((unsigned long *)&__m128i_result[1]) = 0xfb807b807b807b80; ++ *((unsigned long *)&__m128i_result[0]) = 0xfb807b807b807b80; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfbffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfbffffffffffffff; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9941d1d5f4ba9d08; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x9941d155f43a9d08; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffbfffffffbf; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03f1e3d28b1a8a1a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x03f1e3d28b1a8a1a; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffda6f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffe3d7; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefffffffeffda6f; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefffffffeffe3d7; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x26); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080638063; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080638063; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000200008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000200008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000200000; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200000001; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xdfdfdfdfdfdfdfdf; ++ *((unsigned long *)&__m128i_result[0]) = 0xdfdfdfdfdfdfdfdf; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c +new file mode 100644 +index 000000000..ba4f4b6dc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c +@@ -0,0 +1,407 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1b71a083b3dec3cd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x373a13323b4cdbc1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0802010808400820; ++ *((unsigned long *)&__m128i_result[0]) = 0x8004080408100802; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitrev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000501000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000008; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000040100; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010400100203; ++ *((unsigned long *)&__m128i_result[0]) = 0x0103010301020109; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffbe6ed563; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd0b1ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9d519ee8d2d84f1d; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long *)&__m128i_result[0]) = 0xdffdbffeba6f5543; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7da9b23a624082fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x2002040404010420; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010180800101; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fffe0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001ffff0001fffe; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000003f803f4; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000000010000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100100000; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x040004000400040d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0501050105010501; ++ *((unsigned long *)&__m128i_result[0]) = 0x050105010501050c; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitrev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100010001fffe; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000007f00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ffffffeffffffff; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0040000000400000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0040000000400000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0141010101410101; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x65b780a3ae3bf8cb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x161d0c363c200826; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x65b780a2ae3bf8ca; ++ *((unsigned long *)&__m128i_result[0]) = 0x161d0c373c200827; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe01fe01fe01fe01; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe01fe01fe01fe01; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000003bfb4000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000021ffffffdf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000e60; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1ff85ffe2ae5d973; ++ *((unsigned long *)&__m128i_result[1]) = 0x00010020fffeffde; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100400100200e68; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001021; ++ *((unsigned long *)&__m128i_result[1]) = 0x0108020410400208; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010102; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff0000ff86; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x010101fe0101fe87; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x343d8dc5b0ed5a08; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x353c8cc4b1ec5b09; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0037ffc8d7ff2800; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ffffff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0038d800ff000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fffe00fffffe00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0137ffc9d7fe2801; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f00ff017fffff01; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001200100012001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffe7fffffff; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000010000000; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffdfffcfffdfffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffdfffcfffdfffc; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001ffff0101ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0103fefd0303fefd; ++ *((unsigned long *)&__m128i_result[0]) = 0x0103fefd0103fefd; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6a5d5b056f2f4978; ++ *((unsigned long *)&__m128i_op1[0]) = 0x17483c07141b5971; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002001000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000008000020000; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffefffe; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001ce28f9c0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000004e06b0890; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefefefdbffefdfe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefefeeffef7fefe; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003ffffe00800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff810001ff810002; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f804000ff810001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff1affff01001fe0; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff1aff6d02834d70; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000034; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe1bfefe00011ee1; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe1bfe6c03824c60; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41945926d8000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00001e5410082727; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007f7f00107f7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001001001000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x4195d926d8018000; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f8100017f810001; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8100017f810001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x545501550001113a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xd45501550001113a; ++ __m128i_out = __lsx_vbitrev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c +new file mode 100644 +index 000000000..9739182cd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c +@@ -0,0 +1,336 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000400000007004; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfeffffffffffffff; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x38); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4000400040004000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4000400040004000; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000007fff8000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001008100000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0800080077ff8800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0801088108000805; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m128i_result[0]) = 0x0202020202020202; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe86ce7eb5e9ce950; ++ *((unsigned long *)&__m128i_result[1]) = 0x0404040404040404; ++ *((unsigned long *)&__m128i_result[0]) = 0xec68e3ef5a98ed54; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000400000204010; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0400040004000400; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x04000400fbfffb02; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000000100000; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x040004000400040d; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000004f804f81; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000004f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000004fc04f81; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000004fc04f80; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040004000400040; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m128i_result[0]) = 0xefefefefefefefef; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[0]) = 0x4040404040404040; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d1c1b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128i_result[0]) = 0x3918371635143312; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x61608654a2d4f6da; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff0800080008000; ++ *((unsigned long *)&__m128i_result[0]) = 0xe160065422d476da; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x77c0401040004000; ++ *((unsigned long *)&__m128i_result[0]) = 0x77c0401040004000; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x77c0404a4000403a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x77c03fd640003fc6; ++ *((unsigned long *)&__m128i_result[1]) = 0x75c0404a4200403a; ++ *((unsigned long *)&__m128i_result[0]) = 0x75c03fd642003fc6; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808280808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808280808; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000100fffffeff; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0404050404040404; ++ *((unsigned long *)&__m128i_result[0]) = 0x0404050404040404; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m128i_result[0]) = 0xbfbfbfbfbfbfbfbf; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000040000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000040000000; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000020000; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x2000200020002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x2000200020002000; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x441ba9fcffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x181b2541ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x401fadf8fbfbfbfb; ++ *((unsigned long *)&__m128i_result[0]) = 0x1c1f2145fbfbfbfb; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffefff00001000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffefff00001000; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000002000; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010000000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000100; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd6a09e662ab46b31; ++ *((unsigned long *)&__m128i_op0[0]) = 0x34b8122ef4054bb3; ++ *((unsigned long *)&__m128i_result[1]) = 0xd6e09e262af46b71; ++ *((unsigned long *)&__m128i_result[0]) = 0x34f8126ef4454bf3; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000200008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000200000; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefefefdbffefdfe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefefeeffef7feff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfcfcfcffbdfcfffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcedfcf5fcfd; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000555889; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000002580f01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010000000455889; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000002480f01; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00060fbf00040fbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020fbf00000fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x00060fbf02040fbf; ++ *((unsigned long *)&__m128i_result[0]) = 0x00020fbf02000fbf; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x400000003fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x4000000040000000; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030298a6a1030a49; ++ *((unsigned long *)&__m128i_result[1]) = 0x00197f26cb658837; ++ *((unsigned long *)&__m128i_result[0]) = 0x01009aa4a301084b; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x3); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000c6c60000c6c6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000c6c58000c6b2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000c6c40000c6c6; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000c6c78000c6b2; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff7fffffff7f; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff7fffffff7f; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c +new file mode 100644 +index 000000000..52ac9939f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c +@@ -0,0 +1,109 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000005050000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0505000005050505; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000d02540000007e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001400140014; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0505050505050505; ++ *((unsigned long *)&__m128i_op2[0]) = 0x03574e38e496cbc9; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005000400000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0400001001150404; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080001300000013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080001300000013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080001300000013; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf0bc9a5278285a4a; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x62cbf84c02cbac00; ++ *((unsigned long *)&__m128i_result[0]) = 0x1014120210280240; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff59; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff59; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c +new file mode 100644 +index 000000000..f2d6fb042 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c +@@ -0,0 +1,84 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6664666466646664; ++ *((unsigned long *)&__m128i_result[0]) = 0x6664666466646664; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff0000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x5d5d5d5d5d5d5d55; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x5d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x5959595959595959; ++ *((unsigned long *)&__m128i_result[0]) = 0x5959595959595959; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x59); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0xaa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0b4c600000000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004280808080808; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0xa4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007770ffff9411; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007770ffff941d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_result[0]) = 0x000047404f4f040d; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x4f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c +new file mode 100644 +index 000000000..e05af675e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c +@@ -0,0 +1,371 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe001ffffe001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe001ffffe001; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000038335ca2777; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000800800000; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0982e2daf234ed87; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf51df8dbd6050189; ++ *((unsigned long *)&__m128i_result[0]) = 0x0983e2dbf235ed87; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5d5d5d5d5d5d5d55; ++ *((unsigned long *)&__m128i_result[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe00fcfffe21fd01; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff7fc01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x80000000fff7fc01; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe00000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff01010105; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001c00ffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010201808040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010280808040; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f8000003f800001; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f8000003f800001; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000010a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000104000800; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000408; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000100; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff994cb09c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc3639d96; ++ *((unsigned long *)&__m128i_op1[1]) = 0x20de27761210386d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x34632935195a123c; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff994db09c; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffc7639d96; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000545cab1d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000081a83bea; ++ *((unsigned long *)&__m128i_op1[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128i_op1[0]) = 0x545cab1d81a83bea; ++ *((unsigned long *)&__m128i_result[1]) = 0x00400000547cab1d; ++ *((unsigned long *)&__m128i_result[0]) = 0x2000000081a83fea; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000038003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000040033; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100080000; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0909090909090909; ++ *((unsigned long *)&__m128i_result[0]) = 0x0909090909090909; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00a600e000a600e0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01500178010000f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100000001000000; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfefbff06fffa0004; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfefeff04fffd0004; ++ *((unsigned long *)&__m128i_result[1]) = 0x4008804080040110; ++ *((unsigned long *)&__m128i_result[0]) = 0x4040801080200110; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8101010181010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x8101010181010101; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101030101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101030101; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5779108fdedda7e4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xd78cfd70b5f65d77; ++ *((unsigned long *)&__m128i_result[0]) = 0x5779108fdedda7e5; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00004a1e00004a1e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x4000000040000000; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0007000000050000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080000100200001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0008000200020002; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff80ffff7e02; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00feff8000ff80ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0280000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff81ffff7f03; ++ *((unsigned long *)&__m128i_result[0]) = 0x04ffff8101ff81ff; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4480000044800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x45c0000044800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x4481000144810001; ++ *((unsigned long *)&__m128i_result[0]) = 0x45c04000c4808000; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3bc000003a800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x3a8100013a810001; ++ *((unsigned long *)&__m128i_result[0]) = 0x7bc04000ba808000; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000cecd00004657; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000c90000011197; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000200000800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100800000; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f8000017f800001; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8000017f800001; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c +new file mode 100644 +index 000000000..540a724a7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c +@@ -0,0 +1,279 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0020002000200020; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0040000000ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040000000000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x36); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x54beed87bc3f2be1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8024d8f6a494afcb; ++ *((unsigned long *)&__m128i_result[1]) = 0x54feed87bc3f2be1; ++ *((unsigned long *)&__m128i_result[0]) = 0x8064d8f6a494afcb; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x36); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000c400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x001000100010c410; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2e2b34ca59fa4c88; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long *)&__m128i_result[1]) = 0x3e2b34ca59fa4c88; ++ *((unsigned long *)&__m128i_result[0]) = 0x3b2c8aefd44be966; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040004017fda869; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x800000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x800000ff080000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000000010000; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0004000000040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004000000040000; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0982e2daf234ed87; ++ *((unsigned long *)&__m128i_result[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_result[0]) = 0x0982eadaf234ed87; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000000000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x31); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000006; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000006; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000030000003f; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe5e5e5e5e5e5e5e5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe5e5e5e5e4e4e46d; ++ *((unsigned long *)&__m128i_result[1]) = 0xe5e5e5e5e5e5e5e5; ++ *((unsigned long *)&__m128i_result[0]) = 0xe5e5e5e5e4e4e46d; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0800080008000800; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100000001000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100000001000000; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_result[0]) = 0x2020202020207fff; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000900000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000900013fa0; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3ff0008000800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x40f3fa8000800080; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000040000000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0404040404040404; ++ *((unsigned long *)&__m128i_result[0]) = 0xc404040404040404; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000040804000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000040804000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000040a04000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000040a04000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f81e3779b97f4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff02000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1f81e3779b97f4a8; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000008000000080; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100010001000101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100010001000101; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000010000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000010000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002711250a27112; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00d2701294027112; ++ *((unsigned long *)&__m128i_result[1]) = 0x080a791a58aa791a; ++ *((unsigned long *)&__m128i_result[0]) = 0x08da781a9c0a791a; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_result[1]) = 0x1313131313131313; ++ *((unsigned long *)&__m128i_result[0]) = 0x1313131313131313; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000000000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff0008000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff0008000000080; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c +new file mode 100644 +index 000000000..2c1099a04 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c +@@ -0,0 +1,266 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vclo_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe2ecd48adedc7c82; ++ *((unsigned long *)&__m128i_op0[0]) = 0x25d666472b01d18d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0303020102020001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000000000201; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000fefefe6a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000007070700; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000002010202; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000007e8a60; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000001edde; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x05d0ae6002e8748e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcd1de80217374041; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000001fffff59; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000aaaa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe500ffffc085; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc000ffffc005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001300000012; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001200000012; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001000000000; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000a00000009; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x413e276583869d79; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f7f017f9d8726d3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc090380000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000200000000d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fec20704; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000200000001c; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000200000001c; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c +new file mode 100644 +index 000000000..12df2c670 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c +@@ -0,0 +1,265 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000800100008; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000001fc1a568; ++ *((unsigned long *)&__m128i_op0[0]) = 0x02693fe0e7beb077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0006000200000000; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f7f000b000b000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000b000b010a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101080408040804; ++ *((unsigned long *)&__m128i_result[0]) = 0x0804080407040804; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1ffffffff8001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf0bd80bd80bd8000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010000fe7c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000100010000fe01; ++ *((unsigned long *)&__m128i_result[1]) = 0x000f000f00100000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000f000f00100000; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x41dfffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100000008080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vclz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000039; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000039; ++ __m128i_out = __lsx_vclz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff000100ff00fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff003000ff00a0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0008000f00080008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0008000a00080008; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000bffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vclz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000c0c00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x687a8373f249bc44; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7861145d9241a14a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101030100010001; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vclz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080700000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000f0000000f; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000008000001e; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000200000001b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000000; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080805; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080805; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000000000; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c +new file mode 100644 +index 000000000..66982d89f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c +@@ -0,0 +1,350 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x7); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000003c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000800000008; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0701000007010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0701000000000000; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x807f7f8000ffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff00feff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0107070100080800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080800070800; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_result[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_result[0]) = 0x0303030303030303; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000100010; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_op0[0]) = 0x803f800080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000009; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000000010000; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0ba00ba00ba00ba0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0ba00ba00ba011eb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000a0000000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000a0000000d; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfbfbfb17fbfb38ea; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfbfb47fbfbfb0404; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000029; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffbfc0ffffbfc0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000032; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003000900050007; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0800080008000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe160065422d476da; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000d00000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000b00000010; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001000000000; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0103000201030002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000008; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000200000001e; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000200000001e; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbbe5560400010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe7e5dabf00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000b000500010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x000b000c00010001; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001f0000001f; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000600007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000008ffffa209; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000016; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000467fef81; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000013; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000fe03fe01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fe01fe01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000007020701; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000007010701; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f80000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf654ad7447e59090; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b1b106b8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000120000000d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000e0000000e; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-vdiv-vmod-instruct.patch b/LoongArch-Add-tests-for-SX-vector-vdiv-vmod-instruct.patch new file mode 100644 index 0000000000000000000000000000000000000000..447db395cf222980e673a2b3e1a1b0014fb4e814 --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-vdiv-vmod-instruct.patch @@ -0,0 +1,1114 @@ +From 1a3f6886143b0fd334d1d7530bce0a746b106b27 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 18:51:44 +0800 +Subject: [PATCH 088/124] LoongArch: Add tests for SX vector vdiv/vmod + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmod-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmod-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vdiv-1.c | 299 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vdiv-2.c | 254 +++++++++++++++ + .../loongarch/vector/lsx/lsx-vmod-1.c | 254 +++++++++++++++ + .../loongarch/vector/lsx/lsx-vmod-2.c | 254 +++++++++++++++ + 4 files changed, 1061 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c +new file mode 100644 +index 000000000..cb4be0475 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c +@@ -0,0 +1,299 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xc110000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc00d060000000000; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101000101010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000fe0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00ffffff00ff; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010100000000; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff9727ffff9727; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffe79ffffba5f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x010169d9010169d9; ++ *((unsigned long *)&__m128i_result[0]) = 0x01010287010146a1; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000408; ++ *((unsigned long *)&__m128i_op1[1]) = 0x80010001b57fc565; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8001000184000be0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000080001fffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff9cf0d77b; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc1000082b0fb585b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff010000ff01; ++ __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363abdf16; ++ *((unsigned long *)&__m128i_op1[0]) = 0x41f8e08016161198; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000030000; ++ __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003ff000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fffc00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00001ff800000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ffe800e80000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x195f307a5d04acbb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6a1a3fbb3c90260e; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xe6a0cf86a2fb5345; ++ *((unsigned long *)&__m128i_result[0]) = 0x95e5c045c36fd9f2; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa2e3a36363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa2e3a36463636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f80000000000007; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000700000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000e32c50e; ++ *((unsigned long *)&__m128i_result[0]) = 0xf2b2ce330e32c50e; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000001; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001084314a6; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001084314a6; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000ffef0010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000010000010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101000001000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4280000042800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xbd7fffffbd800000; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_op1[0]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000004ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000667ae56; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c +new file mode 100644 +index 000000000..f2bc7df27 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c +@@ -0,0 +1,254 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x31b1777777777776; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6eee282828282829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0effeffefdffa1e0; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe6004c5f64284224; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x203e16d116de012b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000073; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000002a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000003f200001e01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000014bf000019da; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c99aed5b88fcf; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7c3650c5f79a61a3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080800008; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffd700; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffbfff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0080006b0000000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000001ff1745745c; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff14eb54ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0x14ea6a002a406a00; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80008a7555aa; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a7535006af05cf9; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfebffefffebffeff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfebffefffebffeff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363797c63996399; ++ *((unsigned long *)&__m128i_op0[0]) = 0x171f0a1f6376441f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363797c63996399; ++ *((unsigned long *)&__m128i_op1[0]) = 0x171f0a1f6376441f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000036de0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003be14000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000007e8a60; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000001edde; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000015d926c7; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000e41b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x77c0404a4000403a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x77c03fd640003fc6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0042003e0042002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffc0001fffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0042003e0042002f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fffc0001fffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000feff2356; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fd165486; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000246d9755; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000002427c2ee; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c +new file mode 100644 +index 000000000..5470d40dd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c +@@ -0,0 +1,254 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x82c539ffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc72df14afbfafdf9; ++ *((unsigned long *)&__m128i_op1[1]) = 0x82c539ffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc72df14afbfafdf9; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff994cb09c; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffc3639d96; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0003c853c843c844; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003c853c843c844; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001808281820102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001808201018081; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001008281820102; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001008201010081; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010240010202; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101080408040804; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0804080407040804; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000104000800; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1202120212021202; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1202120212021202; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfeca2eb9931; ++ *((unsigned long *)&__m128i_op1[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[0]) = 0x370bdfeca2eb9931; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x805ffffe01001fe0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9a49e11102834d70; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8144ffff01c820a4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9b2ee1a4034b4e34; ++ *((unsigned long *)&__m128i_result[1]) = 0xff1affff01001fe0; ++ *((unsigned long *)&__m128i_result[0]) = 0xff1aff6d02834d70; ++ __m128i_out = __lsx_vmod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001d001d001d001d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001d001d001d0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001d001d001d001d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001d001d001d0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ffff000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x03c0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x03c0038000000380; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffff000000ff00; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80000000307d0771; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0d8e36706ac02b9b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x80000000307d0771; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0d8e36706ac02b9b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x413e276583869d79; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f7f017f9d8726d3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000011ffee; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000dfff2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c +new file mode 100644 +index 000000000..8deb04427 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c +@@ -0,0 +1,254 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x16161616a16316b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x16161616a16316b0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001494b494a; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001494b494a; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffeb; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffeb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f801fa06451ef11; ++ *((unsigned long *)&__m128i_op1[0]) = 0x68bcf93435ed25ed; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000022666621; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffdd9999da; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f7f7f7f00107f04; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f0000fd7f0000fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000066621; ++ *((unsigned long *)&__m128i_result[0]) = 0x01ff00085e9900ab; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000bd3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xefffdffff0009d3d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000bd3d; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff0000; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000004870ba0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x478b478b38031779; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6b769e690fa1e119; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000004870ba0; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x02b010f881a281a2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff100000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000f000000000000; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fffe0000fffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffe0000fffe; ++ __m128i_out = __lsx_vmod_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000101fd01fe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff80ff80ff80ff80; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff80ff8080008000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000101fd01fe; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcafff8ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff2cfed4fea8ff44; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffeffff0035ff8f; ++ *((unsigned long *)&__m128i_result[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000a0; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003c853c843c844; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003c853c843c844; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000003ddc5dac; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffefffff784; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f8000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001000010f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000011ff8bc; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-vexth-vextl-vldi-v.patch b/LoongArch-Add-tests-for-SX-vector-vexth-vextl-vldi-v.patch new file mode 100644 index 0000000000000000000000000000000000000000..17909c03c9ba667ae498b57b73b1502926dc59dc --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-vexth-vextl-vldi-v.patch @@ -0,0 +1,1664 @@ +From ed55869f2ae380ac36d09746e7e04ce675e197b0 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 18:44:16 +0800 +Subject: [PATCH 086/124] LoongArch: Add tests for SX vector + vexth/vextl/vldi/vneg/vsat instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vexth-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vexth-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vextl-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vextl-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vldi.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vneg.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsat-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsat-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vexth-1.c | 342 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vexth-2.c | 182 ++++++++++ + .../loongarch/vector/lsx/lsx-vextl-1.c | 83 +++++ + .../loongarch/vector/lsx/lsx-vextl-2.c | 83 +++++ + .../loongarch/vector/lsx/lsx-vldi.c | 61 ++++ + .../loongarch/vector/lsx/lsx-vneg.c | 321 ++++++++++++++++ + .../loongarch/vector/lsx/lsx-vsat-1.c | 231 ++++++++++++ + .../loongarch/vector/lsx/lsx-vsat-2.c | 272 ++++++++++++++ + 8 files changed, 1575 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c +new file mode 100644 +index 000000000..f6390800d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c +@@ -0,0 +1,342 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000f909; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1010111105050000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4040000041410101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000110011; ++ *((unsigned long *)&__m128i_result[0]) = 0x0005000500000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000003ffe2; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00003ff000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03c0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03c0038000000380; ++ *((unsigned long *)&__m128i_result[1]) = 0x000003c000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xb9fe3640e4eb1b18; ++ *((unsigned long *)&__m128i_op0[0]) = 0x800000005b4b1b18; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffb9fe00003640; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe4eb00001b18; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfec00130014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfec00130014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000370bffffdfec; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001300000014; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe500c085c000c005; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe5c1a185c48004c5; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe500ffffc085; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffc000ffffc005; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5c9c9c9ce3636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x63635c9e63692363; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000005c9c9c9c; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffe3636363; ++ __m128i_out = __lsx_vexth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x63b2ac27aa076aeb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000063b2ac27; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffaa076aeb; ++ __m128i_out = __lsx_vexth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x002a001a001a000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000002a001a; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000001a000b; ++ __m128i_out = __lsx_vexth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0028280000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x012927ffff272800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0028280000000000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff000000ff; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000020000020; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000fff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffc; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000080; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3a8000003a800000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c +new file mode 100644 +index 000000000..6ab217e97 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c +@@ -0,0 +1,182 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x004f0080004f0080; ++ *((unsigned long *)&__m128i_result[0]) = 0x004f0080004f0080; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff0000007f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x5); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x002cffacffacffab; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000007f00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000082020201; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000820200000201; ++ __m128i_out = __lsx_vexth_wu_hu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fec20704; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000012; ++ __m128i_out = __lsx_vexth_wu_hu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vexth_du_wu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_du_wu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_du_wu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_du_wu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000001; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000b5207f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff7fc01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000001fc00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000000020000; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000b4a00008808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080800000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000b4a00008808; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000400080003fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000bc2000007e10; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000400080003fff; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c +new file mode 100644 +index 000000000..99854dbd8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c +@@ -0,0 +1,83 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000170014; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff0cff78ff96ff14; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe500ffffc085; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc000ffffc005; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffc000ffffc005; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3131313131313131; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3131313131313131; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3131313131313131; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c +new file mode 100644 +index 000000000..73bb530c9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c +@@ -0,0 +1,83 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000101fffff8b68; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000b6fffff8095; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000b6fffff8095; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000104000800; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010000fe7c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000100010000fe01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100010000fe01; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000d82; ++ *((unsigned long *)&__m128i_op0[0]) = 0x046a09ec009c0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x046a09ec009c0000; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c +new file mode 100644 +index 000000000..089500ea9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c +@@ -0,0 +1,61 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_result[1]) = 0x00a300a300a300a3; ++ *((unsigned long *)&__m128i_result[0]) = 0x00a300a300a300a3; ++ __m128i_out = __lsx_vldi (1187); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffe15; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffe15; ++ __m128i_out = __lsx_vldi (3605); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_result[1]) = 0xecececececececec; ++ *((unsigned long *)&__m128i_result[0]) = 0xecececececececec; ++ __m128i_out = __lsx_vldi (1004); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffff00ff00ff00; ++ __m128i_out = __lsx_vldi (-1686); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3); ++ *((unsigned long *)&__m128i_result[1]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_result[0]) = 0x004d004d004d004d; ++ __m128i_out = __lsx_vldi (1101); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_result[1]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a0000000a000000; ++ __m128i_out = __lsx_vldi (-3318); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffff00ff00ff00; ++ __m128i_out = __lsx_vldi (-1686); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_result[1]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a0000000a000000; ++ __m128i_out = __lsx_vldi (-3318); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c +new file mode 100644 +index 000000000..9441ba50e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c +@@ -0,0 +1,321 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffeffffffff; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffffffc; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffff01; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000fff3; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff0001ffffff0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100000101; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100ff010101f6; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff000000ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100000001000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100010000000000; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffffeff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffbff8888080a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x080803ff807ff7f9; ++ *((unsigned long *)&__m128i_result[1]) = 0x010105017878f8f6; ++ *((unsigned long *)&__m128i_result[0]) = 0xf8f8fd0180810907; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000300000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffdffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffeffff; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x441ba9fcffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x181b2541ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xbbe5560400010001; ++ *((unsigned long *)&__m128i_result[0]) = 0xe7e5dabf00010001; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000060a3db; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa70594c000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ff9f5c25; ++ *((unsigned long *)&__m128i_result[0]) = 0x58fa6b4000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000008000001e; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff7fffffe2; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_result[0]) = 0x377b810912c0e000; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffc00001ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x003ffffe00800000; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x087c000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000087c; ++ *((unsigned long *)&__m128i_result[1]) = 0xf784000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffff784; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c +new file mode 100644 +index 000000000..cd8eefb47 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c +@@ -0,0 +1,231 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf000000000000000; ++ __m128i_out = __lsx_vsat_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsat_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_result[1]) = 0x03ff0101fc010102; ++ *((unsigned long *)&__m128i_result[0]) = 0x03fffffffc010102; ++ __m128i_out = __lsx_vsat_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsat_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff8383ffff7d0d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe000ffff1fff; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff8f8da00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00ffff00; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000000010001; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000c000ffffc000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0038d800ff000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00fffe00fffffe00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0038f000ff000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fffe00fffffe00; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x003f0000003f0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x003f0000003f0000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0674c886fcba4e98; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfdce8003090b0906; ++ *((unsigned long *)&__m128i_result[1]) = 0x003fffc0ffc0003f; ++ *((unsigned long *)&__m128i_result[0]) = 0xffc0ffc0003f003f; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x04e00060ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x04e00060ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x007fffffffffffff; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000017f0a82; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000003f; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3); ++ *((unsigned long *)&__m128i_op0[1]) = 0x8006000080020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8004000080020000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff8fffffff8; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff8fffffff8; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd27db010d20fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff00000000f; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003ff8; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_d (__m128i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c +new file mode 100644 +index 000000000..31e3919bf +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c +@@ -0,0 +1,272 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff1739ffff48aa; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff2896ffff5b88; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f3f17393f3f3f3f; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f3f283f3f3f3f3f; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000001fc00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010100000000; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffcc000b000b000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000b000b010a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f7f000b000b000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000b000b010a000b; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000068; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001f; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffcd63ffffcd63; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffd765ffffd765; ++ *((unsigned long *)&__m128i_result[1]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_result[0]) = 0x1f1f1f1f1f1f1f1f; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000120000000d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x003f00000000003f; ++ *((unsigned long *)&__m128i_result[0]) = 0x003f000000000000; ++ __m128i_out = __lsx_vsat_hu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vsat_hu (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_hu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_hu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000006de1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5f9ccf33cf600000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m128i_result[0]) = 0x0007000700070000; ++ __m128i_out = __lsx_vsat_hu (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff7fc01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000f; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000bd3d00000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007000000050000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003fff00003fff; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001a323b5430048c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x008f792cab1cb915; ++ *((unsigned long *)&__m128i_result[1]) = 0x001a323b00ffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x008f792c00ffffff; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x20); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636389038903; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636389038903; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000001ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000001ffff; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x22); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x36); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000001fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x34); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000001fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa8a74bff9e9e0070; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9e9e72ff9e9ff9ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-vfcmp-instructions.patch b/LoongArch-Add-tests-for-SX-vector-vfcmp-instructions.patch new file mode 100644 index 0000000000000000000000000000000000000000..4a64bb81151984addc33a3ab1d359450fa8f0453 --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-vfcmp-instructions.patch @@ -0,0 +1,5295 @@ +From 8cea23eb3f7e7aee77d0cf87581754c017691b91 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 11:31:16 +0800 +Subject: [PATCH 095/124] LoongArch: Add tests for SX vector vfcmp + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vfcmp_caf.c | 244 ++++++++ + .../loongarch/vector/lsx/lsx-vfcmp_ceq.c | 516 +++++++++++++++++ + .../loongarch/vector/lsx/lsx-vfcmp_cle.c | 530 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vfcmp_clt.c | 476 ++++++++++++++++ + .../loongarch/vector/lsx/lsx-vfcmp_cne.c | 378 +++++++++++++ + .../loongarch/vector/lsx/lsx-vfcmp_cor.c | 170 ++++++ + .../loongarch/vector/lsx/lsx-vfcmp_cun.c | 253 +++++++++ + .../loongarch/vector/lsx/lsx-vfcmp_saf.c | 214 +++++++ + .../loongarch/vector/lsx/lsx-vfcmp_seq.c | 450 +++++++++++++++ + .../loongarch/vector/lsx/lsx-vfcmp_sle.c | 407 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vfcmp_slt.c | 512 +++++++++++++++++ + .../loongarch/vector/lsx/lsx-vfcmp_sne.c | 398 +++++++++++++ + .../loongarch/vector/lsx/lsx-vfcmp_sor.c | 269 +++++++++ + .../loongarch/vector/lsx/lsx-vfcmp_sun.c | 335 +++++++++++ + 14 files changed, 5152 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c +new file mode 100644 +index 000000000..b448c2076 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c +@@ -0,0 +1,244 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x01010101; ++ *((int *)&__m128_op0[0]) = 0x01010101; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7ef400ad; ++ *((int *)&__m128_op0[2]) = 0x21fc7081; ++ *((int *)&__m128_op0[1]) = 0x28bf0351; ++ *((int *)&__m128_op0[0]) = 0xec69b5f2; ++ *((int *)&__m128_op1[3]) = 0xff800000; ++ *((int *)&__m128_op1[2]) = 0xff800000; ++ *((int *)&__m128_op1[1]) = 0xff800000; ++ *((int *)&__m128_op1[0]) = 0x7fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x01000100; ++ *((int *)&__m128_op0[0]) = 0x01000100; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x64e464e4; ++ *((int *)&__m128_op1[0]) = 0x64e464e4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffc0ff80; ++ *((int *)&__m128_op1[2]) = 0xff800000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xc0800000; ++ *((int *)&__m128_op1[3]) = 0x0000001b; ++ *((int *)&__m128_op1[2]) = 0x0000001b; ++ *((int *)&__m128_op1[1]) = 0x0000001b; ++ *((int *)&__m128_op1[0]) = 0x0000001b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000002; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000002; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x34500292; ++ *((int *)&__m128_op1[0]) = 0x0f3017d6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00830029; ++ *((int *)&__m128_op0[0]) = 0x0038ff50; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff7fff80; ++ *((int *)&__m128_op0[2]) = 0xff800001; ++ *((int *)&__m128_op0[1]) = 0xe593d844; ++ *((int *)&__m128_op0[0]) = 0xe593c8c4; ++ *((int *)&__m128_op1[3]) = 0xff800000; ++ *((int *)&__m128_op1[2]) = 0xff800000; ++ *((int *)&__m128_op1[1]) = 0xe593c8c4; ++ *((int *)&__m128_op1[0]) = 0xe593c8c4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x8a8a8a8a; ++ *((int *)&__m128_op1[2]) = 0x8a8a8a8a; ++ *((int *)&__m128_op1[1]) = 0x8a8a8a8a; ++ *((int *)&__m128_op1[0]) = 0x8a8a8a8a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x66b34f643c9c626a; ++ *((unsigned long *)&__m128d_op0[0]) = 0x38d60e366e547876; ++ *((unsigned long *)&__m128d_op1[1]) = 0x66b34f643c9c626a; ++ *((unsigned long *)&__m128d_op1[0]) = 0x38d60e366e547876; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000700000004fdff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000300000000fdff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xf2f444429d96dbe1; ++ *((unsigned long *)&__m128d_op0[0]) = 0xddd76c75f2f44442; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xc1f03e1042208410; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffbfff7fffc000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffff43dfffff81fb; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c +new file mode 100644 +index 000000000..98941b47d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c +@@ -0,0 +1,516 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00007f00; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x01000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x08fdc221; ++ *((int *)&__m128_op0[2]) = 0xbfdb1927; ++ *((int *)&__m128_op0[1]) = 0x4303c67e; ++ *((int *)&__m128_op0[0]) = 0x9b7fb213; ++ *((int *)&__m128_op1[3]) = 0x0000800c; ++ *((int *)&__m128_op1[2]) = 0x0004300c; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000800; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00007fff; ++ *((int *)&__m128_op0[2]) = 0x00007fff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x2bfd9461; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x2bfd9461; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x01000000; ++ *((int *)&__m128_op0[0]) = 0x01000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xcd636363; ++ *((int *)&__m128_op1[2]) = 0xcd636363; ++ *((int *)&__m128_op1[1]) = 0xcd636363; ++ *((int *)&__m128_op1[0]) = 0xcd636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x007fffff; ++ *((int *)&__m128_op0[1]) = 0x007fffff; ++ *((int *)&__m128_op0[0]) = 0xff800000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x0000cecd; ++ *((int *)&__m128_op1[2]) = 0x00004657; ++ *((int *)&__m128_op1[1]) = 0x0000c900; ++ *((int *)&__m128_op1[0]) = 0x00011197; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf51df8db; ++ *((int *)&__m128_op0[2]) = 0xd6050189; ++ *((int *)&__m128_op0[1]) = 0x0983e2db; ++ *((int *)&__m128_op0[0]) = 0xf235ed87; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0x3ea5016b; ++ *((int *)&__m128_op1[1]) = 0xfffefffe; ++ *((int *)&__m128_op1[0]) = 0x3f6fb04d; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xffa8ff9f; ++ *((int *)&__m128_op1[1]) = 0x0000ffff; ++ *((int *)&__m128_op1[0]) = 0xffabff99; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000ff00; ++ *((int *)&__m128_op1[3]) = 0x40404040; ++ *((int *)&__m128_op1[2]) = 0x40404040; ++ *((int *)&__m128_op1[1]) = 0x40404040; ++ *((int *)&__m128_op1[0]) = 0x40404040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x3bcc5098; ++ *((int *)&__m128_op1[2]) = 0x703fa5f0; ++ *((int *)&__m128_op1[1]) = 0xab7b3134; ++ *((int *)&__m128_op1[0]) = 0x9703f605; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x000000ff; ++ *((int *)&__m128_op0[0]) = 0xfe01fd02; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x0001fe01; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x000000ff; ++ *((int *)&__m128_op0[0]) = 0xfe01fd02; ++ *((int *)&__m128_op1[3]) = 0x00000001; ++ *((int *)&__m128_op1[2]) = 0x00000100; ++ *((int *)&__m128_op1[1]) = 0x00000001; ++ *((int *)&__m128_op1[0]) = 0x00000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00800000; ++ *((int *)&__m128_op0[0]) = 0x00800000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00800000; ++ *((int *)&__m128_op1[0]) = 0x00800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xc2409edab019323f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x460f3b393ef4be3a; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0100000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128d_op0[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128d_op1[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0100000001000100; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0100010000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000c000ffffc000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffe000ffdf; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xd78cfd70b5f65d77; ++ *((unsigned long *)&__m128d_op1[0]) = 0x5779108fdedda7e5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000ff0000ffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c +new file mode 100644 +index 000000000..409bce0ec +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c +@@ -0,0 +1,530 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00001802; ++ *((int *)&__m128_op0[0]) = 0x041b0013; ++ *((int *)&__m128_op1[3]) = 0xff800000; ++ *((int *)&__m128_op1[2]) = 0xff800000; ++ *((int *)&__m128_op1[1]) = 0xff800000; ++ *((int *)&__m128_op1[0]) = 0xc3080000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x17fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000404; ++ *((int *)&__m128_op1[2]) = 0x00000383; ++ *((int *)&__m128_op1[1]) = 0xffffe000; ++ *((int *)&__m128_op1[0]) = 0xffff1fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x000000fe; ++ *((int *)&__m128_op0[2]) = 0x808000ff; ++ *((int *)&__m128_op0[1]) = 0x000000fe; ++ *((int *)&__m128_op0[0]) = 0x808000fe; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000020; ++ *((int *)&__m128_op0[2]) = 0x00000020; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x0000ffc1; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000004; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xe0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x01010001; ++ *((int *)&__m128_op1[2]) = 0x00010001; ++ *((int *)&__m128_op1[1]) = 0x01010301; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffff00; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000001; ++ *((int *)&__m128_op1[2]) = 0x00000001; ++ *((int *)&__m128_op1[1]) = 0x00000001; ++ *((int *)&__m128_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00060fbf; ++ *((int *)&__m128_op1[2]) = 0x02040fbf; ++ *((int *)&__m128_op1[1]) = 0x00020fbf; ++ *((int *)&__m128_op1[0]) = 0x02000fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0a752a55; ++ *((int *)&__m128_op0[1]) = 0x0a753500; ++ *((int *)&__m128_op0[0]) = 0x950fa306; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x380fdfdf; ++ *((int *)&__m128_op1[0]) = 0xc0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000007fff800000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000080007f80800; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000080800000808; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffff80800001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffff80800001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x5f675e96a8d359f5; ++ *((unsigned long *)&__m128d_op0[0]) = 0x46387f95d9a68001; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x131211101211100f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x11100f0e100f0e0d; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000002a000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffff7f8c; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfcfcfcfcfcfc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffffffff359f358; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffffffff359f358; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128d_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128d_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000001; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x4f804f81; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x4f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7fff0007; ++ *((int *)&__m128_op0[2]) = 0xe215b122; ++ *((int *)&__m128_op0[1]) = 0x7ffeffff; ++ *((int *)&__m128_op0[0]) = 0x7bfff828; ++ *((int *)&__m128_op1[3]) = 0x80010009; ++ *((int *)&__m128_op1[2]) = 0x816ac5de; ++ *((int *)&__m128_op1[1]) = 0x80010001; ++ *((int *)&__m128_op1[0]) = 0x84000bd8; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xfefa0000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x9c9c9c9c; ++ *((int *)&__m128_op1[2]) = 0x9c9c9c9c; ++ *((int *)&__m128_op1[1]) = 0x9c9c9c9c; ++ *((int *)&__m128_op1[0]) = 0x9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0c0b0a09; ++ *((int *)&__m128_op0[2]) = 0x0b0a0908; ++ *((int *)&__m128_op0[1]) = 0x0a090807; ++ *((int *)&__m128_op0[0]) = 0x09080706; ++ *((int *)&__m128_op1[3]) = 0x0c0b0a09; ++ *((int *)&__m128_op1[2]) = 0x0b0a0908; ++ *((int *)&__m128_op1[1]) = 0x0a090807; ++ *((int *)&__m128_op1[0]) = 0x09080706; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000020; ++ *((int *)&__m128_op1[2]) = 0x00000020; ++ *((int *)&__m128_op1[1]) = 0x0000001f; ++ *((int *)&__m128_op1[0]) = 0x0000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7ff80000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x7ff80000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff80ff0010ff06; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128d_op1[0]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c +new file mode 100644 +index 000000000..39c9cf7a7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c +@@ -0,0 +1,476 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x56411278; ++ *((int *)&__m128_op0[2]) = 0x43c0d41e; ++ *((int *)&__m128_op0[1]) = 0x0124d8f6; ++ *((int *)&__m128_op0[0]) = 0xa494006b; ++ *((int *)&__m128_op1[3]) = 0x7f800000; ++ *((int *)&__m128_op1[2]) = 0xff800000; ++ *((int *)&__m128_op1[1]) = 0xff800000; ++ *((int *)&__m128_op1[0]) = 0xff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x84939413; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000002; ++ *((int *)&__m128_op0[0]) = 0xbefcb21e; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xfffefff6; ++ *((int *)&__m128_op0[0]) = 0xfff80002; ++ *((int *)&__m128_op1[3]) = 0x000000c5; ++ *((int *)&__m128_op1[2]) = 0xac01015b; ++ *((int *)&__m128_op1[1]) = 0xaaacac88; ++ *((int *)&__m128_op1[0]) = 0xa3a9a96a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff84fff4; ++ *((int *)&__m128_op0[2]) = 0xff84fff4; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffff0; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x11000f20; ++ *((int *)&__m128_op0[2]) = 0x10000e20; ++ *((int *)&__m128_op0[1]) = 0x0f000d20; ++ *((int *)&__m128_op0[0]) = 0x0e000c20; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000c00; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00bd003d; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000005; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000005; ++ *((int *)&__m128_op1[3]) = 0xfffefffe; ++ *((int *)&__m128_op1[2]) = 0xfffefffe; ++ *((int *)&__m128_op1[1]) = 0xfffefffe; ++ *((int *)&__m128_op1[0]) = 0xfffefffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xff800001; ++ *((int *)&__m128_op0[0]) = 0x0f800000; ++ *((int *)&__m128_op1[3]) = 0x00000009; ++ *((int *)&__m128_op1[2]) = 0x00000009; ++ *((int *)&__m128_op1[1]) = 0xff80000a; ++ *((int *)&__m128_op1[0]) = 0x0f800009; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0000ffff; ++ *((int *)&__m128_op0[1]) = 0x3b5eae24; ++ *((int *)&__m128_op0[0]) = 0xab7e3848; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00003f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x63636363; ++ *((int *)&__m128_op0[2]) = 0x3f3e47c1; ++ *((int *)&__m128_op0[1]) = 0x41f8e080; ++ *((int *)&__m128_op0[0]) = 0xf1ef4eaa; ++ *((int *)&__m128_op1[3]) = 0x0000cecd; ++ *((int *)&__m128_op1[2]) = 0x00004657; ++ *((int *)&__m128_op1[1]) = 0x0000c900; ++ *((int *)&__m128_op1[0]) = 0x00011197; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x454c2996; ++ *((int *)&__m128_op0[2]) = 0x0ffe354e; ++ *((int *)&__m128_op0[1]) = 0x9e063f80; ++ *((int *)&__m128_op0[0]) = 0x2742ba3e; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x42652524; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00070000; ++ *((int *)&__m128_op0[2]) = 0x00050000; ++ *((int *)&__m128_op0[1]) = 0x00030000; ++ *((int *)&__m128_op0[0]) = 0x00010000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xff81007c; ++ *((int *)&__m128_op1[1]) = 0xffb7005f; ++ *((int *)&__m128_op1[0]) = 0x0070007c; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0000006f; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfbffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x7bffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xfbffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x7bffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0002a000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x0002a000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xfc606ec5; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x14155445; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x01030103; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00060fbf; ++ *((int *)&__m128_op0[2]) = 0x02040fbf; ++ *((int *)&__m128_op0[1]) = 0x00020fbf; ++ *((int *)&__m128_op0[0]) = 0x02000fbf; ++ *((int *)&__m128_op1[3]) = 0x63636363; ++ *((int *)&__m128_op1[2]) = 0x63636363; ++ *((int *)&__m128_op1[1]) = 0xffd27db0; ++ *((int *)&__m128_op1[0]) = 0x10d20fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7f800000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000008; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000036de0000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000003be14000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128d_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long *)&__m128d_op1[0]) = 0x65017c2ac9ca9fd0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x007f007f007f007e; ++ *((unsigned long *)&__m128d_op1[0]) = 0x007f007f007effc6; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000015800000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7f8100017f810001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7f8100017f810001; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x004200a000200001; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fff00007fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128d_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000001e; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000001580000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c +new file mode 100644 +index 000000000..c3da43bb4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c +@@ -0,0 +1,378 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x7ff80000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x0bd80bd8; ++ *((int *)&__m128_op1[2]) = 0x0bdfffff; ++ *((int *)&__m128_op1[1]) = 0x0bd80bd8; ++ *((int *)&__m128_op1[0]) = 0x0bd80000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00ff0077; ++ *((int *)&__m128_op0[2]) = 0x00070077; ++ *((int *)&__m128_op0[1]) = 0x00e600ef; ++ *((int *)&__m128_op0[0]) = 0x00ee01de; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00100010; ++ *((int *)&__m128_op0[2]) = 0x00100010; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x000000ff; ++ *((int *)&__m128_op1[0]) = 0xfe01fd02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xbf800000; ++ *((int *)&__m128_op0[2]) = 0x0000ffff; ++ *((int *)&__m128_op0[1]) = 0xcf000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x003f0000; ++ *((int *)&__m128_op1[2]) = 0x0000003f; ++ *((int *)&__m128_op1[1]) = 0x003f0000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x01ff01ff; ++ *((int *)&__m128_op0[2]) = 0x01ff01ff; ++ *((int *)&__m128_op0[1]) = 0x01ff01ff; ++ *((int *)&__m128_op0[0]) = 0x01ff01ff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x20202020; ++ *((int *)&__m128_op0[2]) = 0x20202020; ++ *((int *)&__m128_op0[1]) = 0x20202020; ++ *((int *)&__m128_op0[0]) = 0x20207fff; ++ *((int *)&__m128_op1[3]) = 0x32d3f35e; ++ *((int *)&__m128_op1[2]) = 0xcd509d13; ++ *((int *)&__m128_op1[1]) = 0x3e081b3c; ++ *((int *)&__m128_op1[0]) = 0x93f6b356; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffff0000; ++ *((int *)&__m128_op0[2]) = 0xffff0000; ++ *((int *)&__m128_op0[1]) = 0x40408010; ++ *((int *)&__m128_op0[0]) = 0x80200110; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x80000000; ++ *((int *)&__m128_op0[2]) = 0x80000008; ++ *((int *)&__m128_op0[1]) = 0xa2f54a1e; ++ *((int *)&__m128_op0[0]) = 0xa2f54a1e; ++ *((int *)&__m128_op1[3]) = 0x80000000; ++ *((int *)&__m128_op1[2]) = 0x80000008; ++ *((int *)&__m128_op1[1]) = 0xa2f54a1e; ++ *((int *)&__m128_op1[0]) = 0xa2f54a1e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000000000fc00; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000fc00; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001000100000004; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000020302030; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000020302030; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000100010; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x5d7f5d007f6a007f; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fff7fff7fff7f00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7ff000ff6220c0c1; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffe8081000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x1c083b1f3b1f3b1f; ++ *((unsigned long *)&__m128d_op0[0]) = 0xf244b948a323ab42; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000100fe000070a1; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000115ffffffa1; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffffffff8f8da00; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000003ea5016c; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffefefd3f7027c5; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000ffce; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x400000003fffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x4000000040000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128d_op0[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00007fff7fff8000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c +new file mode 100644 +index 000000000..5228dbede +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c +@@ -0,0 +1,170 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffffeff; ++ *((int *)&__m128_op0[2]) = 0xfffffeff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffcff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00800000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xf4b6f3f5; ++ *((int *)&__m128_op0[0]) = 0x2f4ef4a8; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x08080808; ++ *((int *)&__m128_op1[2]) = 0x08080808; ++ *((int *)&__m128_op1[1]) = 0x08080808; ++ *((int *)&__m128_op1[0]) = 0x08080808; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000ffce; ++ *((int *)&__m128_op1[3]) = 0xffff0001; ++ *((int *)&__m128_op1[2]) = 0x1cf0c569; ++ *((int *)&__m128_op1[1]) = 0xc0000002; ++ *((int *)&__m128_op1[0]) = 0xb0995850; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0a752a55; ++ *((int *)&__m128_op0[1]) = 0x0a753500; ++ *((int *)&__m128_op0[0]) = 0x950fa306; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x0a752a55; ++ *((int *)&__m128_op1[1]) = 0x0a753500; ++ *((int *)&__m128_op1[0]) = 0x950fa306; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128d_op0[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c +new file mode 100644 +index 000000000..a2beff53f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c +@@ -0,0 +1,253 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x0000ffff; ++ *((int *)&__m128_op0[2]) = 0x0000ffff; ++ *((int *)&__m128_op0[1]) = 0x0000ffff; ++ *((int *)&__m128_op0[0]) = 0x0000fffe; ++ *((int *)&__m128_op1[3]) = 0x0000ffff; ++ *((int *)&__m128_op1[2]) = 0x0000ffff; ++ *((int *)&__m128_op1[1]) = 0x0000ffff; ++ *((int *)&__m128_op1[0]) = 0x0000fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00200010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7f800000; ++ *((int *)&__m128_op0[2]) = 0x7f800000; ++ *((int *)&__m128_op0[1]) = 0x7f800000; ++ *((int *)&__m128_op0[0]) = 0x7f800000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x0000fffe; ++ *((int *)&__m128_op0[0]) = 0x0000ffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffff0008; ++ *((int *)&__m128_op1[3]) = 0xffc2ffe0; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x0000ffc1; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000008; ++ *((int *)&__m128_op0[1]) = 0x00200020; ++ *((int *)&__m128_op0[0]) = 0x00200020; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffefffe; ++ *((int *)&__m128_op0[2]) = 0xfffefffe; ++ *((int *)&__m128_op0[1]) = 0xfffefffe; ++ *((int *)&__m128_op0[0]) = 0xfffefffe; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xf001f001; ++ *((int *)&__m128_op1[0]) = 0x0101f002; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfeffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xfeffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000fff; ++ *((int *)&__m128_op1[2]) = 0xffffe000; ++ *((int *)&__m128_op1[1]) = 0x00001020; ++ *((int *)&__m128_op1[0]) = 0x20204000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long *)&__m128d_op0[0]) = 0xdffdbffeba6f5543; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff3d06ffff4506; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7ffffffe7ffff800; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000003bfb4000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000100010100; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff00011cf0c569; ++ *((unsigned long *)&__m128d_op0[0]) = 0xc0000002b0995850; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128d_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000024170000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000044470000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c +new file mode 100644 +index 000000000..bfa4914be +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c +@@ -0,0 +1,214 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x80000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00a300a3; ++ *((int *)&__m128_op1[2]) = 0x00a300a3; ++ *((int *)&__m128_op1[1]) = 0x00a300a3; ++ *((int *)&__m128_op1[0]) = 0x00a300a3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xb8ec43be; ++ *((int *)&__m128_op1[2]) = 0xfe38e64b; ++ *((int *)&__m128_op1[1]) = 0x6477d042; ++ *((int *)&__m128_op1[0]) = 0x343cce24; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000010; ++ *((int *)&__m128_op0[2]) = 0x00100010; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00020000; ++ *((int *)&__m128_op0[0]) = 0xffff0001; ++ *((int *)&__m128_op1[3]) = 0x63636363; ++ *((int *)&__m128_op1[2]) = 0x63636363; ++ *((int *)&__m128_op1[1]) = 0x63636363; ++ *((int *)&__m128_op1[0]) = 0x63636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x03080401; ++ *((int *)&__m128_op0[2]) = 0x0d090107; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0a0a0a000a0a0a00; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0a0a0a0009090900; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffffff01; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x67157b5100005000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x387c7e0a133f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128d_op0[0]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000011ff8bc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c +new file mode 100644 +index 000000000..bc573936d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c +@@ -0,0 +1,450 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf2f2e5e5; ++ *((int *)&__m128_op0[2]) = 0xe5e5e5e5; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xe5e5e5e5; ++ *((int *)&__m128_op1[2]) = 0xe5e5e5e5; ++ *((int *)&__m128_op1[1]) = 0xe5e5e5e5; ++ *((int *)&__m128_op1[0]) = 0xe4e4e46d; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00800000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x1f400000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x14ccc632; ++ *((int *)&__m128_op0[2]) = 0x0076a4d2; ++ *((int *)&__m128_op0[1]) = 0x685670d2; ++ *((int *)&__m128_op0[0]) = 0x7e00682a; ++ *((int *)&__m128_op1[3]) = 0x14ccc632; ++ *((int *)&__m128_op1[2]) = 0x0076a4d2; ++ *((int *)&__m128_op1[1]) = 0x685670d2; ++ *((int *)&__m128_op1[0]) = 0x7e00682a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010001; ++ *((int *)&__m128_op0[2]) = 0x00010001; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xc6c6c6c6; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xc6c6c6c6; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xc6c6c6c6; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xc6c6c6c6; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000006; ++ *((int *)&__m128_op1[2]) = 0x00007fff; ++ *((int *)&__m128_op1[1]) = 0x00000008; ++ *((int *)&__m128_op1[0]) = 0xffffa209; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00fc0000; ++ *((int *)&__m128_op1[3]) = 0xfe07e5fe; ++ *((int *)&__m128_op1[2]) = 0xfefdddfe; ++ *((int *)&__m128_op1[1]) = 0x00020100; ++ *((int *)&__m128_op1[0]) = 0xfedd0c00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffff0000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000fffd; ++ *((int *)&__m128_op1[3]) = 0x7fffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00008000; ++ *((int *)&__m128_op1[2]) = 0x3f80ffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x370bdfec; ++ *((int *)&__m128_op0[2]) = 0xffecffec; ++ *((int *)&__m128_op0[1]) = 0x370bdfec; ++ *((int *)&__m128_op0[0]) = 0xa2eb9931; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000040; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000040; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xc2fc0000; ++ *((int *)&__m128_op1[2]) = 0xc3040000; ++ *((int *)&__m128_op1[1]) = 0xc2fc0000; ++ *((int *)&__m128_op1[0]) = 0xc3040000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00fe00fe; ++ *((int *)&__m128_op0[2]) = 0x000200fe; ++ *((int *)&__m128_op0[1]) = 0x00fe00fe; ++ *((int *)&__m128_op0[0]) = 0x000200fe; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000004; ++ *((int *)&__m128_op1[0]) = 0x55555555; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000158; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffa8; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf3e6586b; ++ *((int *)&__m128_op0[2]) = 0x60d7b152; ++ *((int *)&__m128_op0[1]) = 0xf7077b93; ++ *((int *)&__m128_op0[0]) = 0x4ac0e000; ++ *((int *)&__m128_op1[3]) = 0x1498507a; ++ *((int *)&__m128_op1[2]) = 0x144d0050; ++ *((int *)&__m128_op1[1]) = 0x7b370981; ++ *((int *)&__m128_op1[0]) = 0xc01200e0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffc2007a; ++ *((int *)&__m128_op0[2]) = 0xff230027; ++ *((int *)&__m128_op0[1]) = 0x0080005e; ++ *((int *)&__m128_op0[0]) = 0xff600001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000100010001fffd; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000004fc04f81; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000004fc04f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128d_op1[0]) = 0x545cab1d81a83bea; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long *)&__m128d_op0[0]) = 0xdffdbffeba6f5543; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long *)&__m128d_op1[0]) = 0xdffdbffeba6f5543; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffff700000009; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffffff700000009; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x4fa432d67fc00000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffcffff00000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000fffd000a0000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xf0fd800080000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000a00028004000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00820082ff81ff81; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff81ff81ff81ff81; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000001000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0007000100040102; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0003000100010101; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0007000100040102; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0003000100010101; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c +new file mode 100644 +index 000000000..87cb8da7c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c +@@ -0,0 +1,407 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffdfff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffdfff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffe000; ++ *((int *)&__m128_op1[0]) = 0x01ffe200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010002; ++ *((int *)&__m128_op0[2]) = 0x0000fe7d; ++ *((int *)&__m128_op0[1]) = 0x00010002; ++ *((int *)&__m128_op0[0]) = 0x0000fe02; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x0000007b; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x1223dabf; ++ *((int *)&__m128_op0[2]) = 0x4c3b3549; ++ *((int *)&__m128_op0[1]) = 0x8e8f8626; ++ *((int *)&__m128_op0[0]) = 0xf15be124; ++ *((int *)&__m128_op1[3]) = 0xfffffacd; ++ *((int *)&__m128_op1[2]) = 0xb6dbecac; ++ *((int *)&__m128_op1[1]) = 0x1f5533a6; ++ *((int *)&__m128_op1[0]) = 0x94f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xfbffffff; ++ *((int *)&__m128_op0[0]) = 0x27001517; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x0000ffff; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffff53d9; ++ *((int *)&__m128_op0[1]) = 0xffff0001; ++ *((int *)&__m128_op0[0]) = 0xffff9515; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00010001; ++ *((int *)&__m128_op1[2]) = 0x00010001; ++ *((int *)&__m128_op1[1]) = 0x00010001; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000080; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00df020f; ++ *((int *)&__m128_op0[0]) = 0x0078007f; ++ *((int *)&__m128_op1[3]) = 0x0037ffd4; ++ *((int *)&__m128_op1[2]) = 0x0083ffe5; ++ *((int *)&__m128_op1[1]) = 0x001e0052; ++ *((int *)&__m128_op1[0]) = 0x001ffff9; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00ff00ff; ++ *((int *)&__m128_op0[0]) = 0x00ff00ff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x05452505; ++ *((int *)&__m128_op0[1]) = 0x00000004; ++ *((int *)&__m128_op0[0]) = 0x442403e4; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000101; ++ *((int *)&__m128_op0[2]) = 0x00000101; ++ *((int *)&__m128_op0[1]) = 0x00000101; ++ *((int *)&__m128_op0[0]) = 0x00000101; ++ *((int *)&__m128_op1[3]) = 0x00000002; ++ *((int *)&__m128_op1[2]) = 0x00000002; ++ *((int *)&__m128_op1[1]) = 0x00000002; ++ *((int *)&__m128_op1[0]) = 0x00000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010000; ++ *((int *)&__m128_op0[2]) = 0x00010000; ++ *((int *)&__m128_op0[1]) = 0x0000cd63; ++ *((int *)&__m128_op0[0]) = 0x0000cd63; ++ *((int *)&__m128_op1[3]) = 0xffffcd63; ++ *((int *)&__m128_op1[2]) = 0xffffcd63; ++ *((int *)&__m128_op1[1]) = 0xffffd765; ++ *((int *)&__m128_op1[0]) = 0xffffd765; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffff00ff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffff0000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0x0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffff0c8000c212; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfefffeff7f002d06; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00f0008100800080; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00f000807000009e; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffe00029f9f6061; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffc0800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000c0010000a186; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00067fff0002a207; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffff80000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x3fbf3fbf00007fff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000003fbf3fbf; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000013ec13e; ++ *((unsigned long *)&__m128d_op1[0]) = 0xc03fc03fc0ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffff00018d8b; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x67eb85af0000b000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c +new file mode 100644 +index 000000000..3845e8ec3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c +@@ -0,0 +1,512 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x0000ffff; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x0000ffff; ++ *((int *)&__m128_op1[0]) = 0x0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7f800000; ++ *((int *)&__m128_op0[2]) = 0x7f800000; ++ *((int *)&__m128_op0[1]) = 0x7f800000; ++ *((int *)&__m128_op0[0]) = 0x7f800000; ++ *((int *)&__m128_op1[3]) = 0x7f800000; ++ *((int *)&__m128_op1[2]) = 0x7f800000; ++ *((int *)&__m128_op1[1]) = 0x7f800000; ++ *((int *)&__m128_op1[0]) = 0x7f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0x00007f01; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffff02; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x05452505; ++ *((int *)&__m128_op1[1]) = 0x00000004; ++ *((int *)&__m128_op1[0]) = 0x442403e4; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x5f675e96; ++ *((int *)&__m128_op0[2]) = 0xe29a5a60; ++ *((int *)&__m128_op0[1]) = 0x7fff7fff; ++ *((int *)&__m128_op0[0]) = 0x7fff7fff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x5e695e95; ++ *((int *)&__m128_op1[0]) = 0xe1cb5a01; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00800080; ++ *((int *)&__m128_op0[2]) = 0x00800080; ++ *((int *)&__m128_op0[1]) = 0x0080006b; ++ *((int *)&__m128_op0[0]) = 0x0000000b; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x80808080; ++ *((int *)&__m128_op1[0]) = 0x806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7f800000; ++ *((int *)&__m128_op0[2]) = 0x7f800000; ++ *((int *)&__m128_op0[1]) = 0x7f800000; ++ *((int *)&__m128_op0[0]) = 0x7f800000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff800000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xff800000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xfffefffe; ++ *((int *)&__m128_op0[0]) = 0xfffffffc; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xfffefffe; ++ *((int *)&__m128_op1[0]) = 0xfffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffc2ffe7; ++ *((int *)&__m128_op0[2]) = 0x00000007; ++ *((int *)&__m128_op0[1]) = 0x0000ffc1; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xfffff1a0; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x153e3e49; ++ *((int *)&__m128_op0[2]) = 0x307d0771; ++ *((int *)&__m128_op0[1]) = 0x0d8e3670; ++ *((int *)&__m128_op0[0]) = 0x6ac02b9b; ++ *((int *)&__m128_op1[3]) = 0x55aa55c3; ++ *((int *)&__m128_op1[2]) = 0xd5aa55c4; ++ *((int *)&__m128_op1[1]) = 0xaa55556f; ++ *((int *)&__m128_op1[0]) = 0xd5aaaac1; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0x0015172b; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xfffffffe; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xfffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffff0000; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010001; ++ *((int *)&__m128_op0[2]) = 0x00010001; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x86dd8341; ++ *((int *)&__m128_op1[2]) = 0xb164f12b; ++ *((int *)&__m128_op1[1]) = 0x9611c398; ++ *((int *)&__m128_op1[0]) = 0x5b3159f5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000300000003; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long *)&__m128d_op0[0]) = 0x65017c2ac9ca9fd0; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000001021; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long *)&__m128d_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0080000000800000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x345002920f3017d6; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffff8607db959f; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7ef8000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long *)&__m128d_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffee00000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c +new file mode 100644 +index 000000000..964eff79f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c +@@ -0,0 +1,398 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00003fee; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000004; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000002; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x03574e3a; ++ *((int *)&__m128_op1[2]) = 0x03574e3a; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00010400; ++ *((int *)&__m128_op1[3]) = 0x10f917d7; ++ *((int *)&__m128_op1[2]) = 0x2d3d01e4; ++ *((int *)&__m128_op1[1]) = 0x203e16d1; ++ *((int *)&__m128_op1[0]) = 0x16de012b; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000101f; ++ *((int *)&__m128_op0[2]) = 0xffff8b68; ++ *((int *)&__m128_op0[1]) = 0x00000b6f; ++ *((int *)&__m128_op0[0]) = 0xffff8095; ++ *((int *)&__m128_op1[3]) = 0x10f917d7; ++ *((int *)&__m128_op1[2]) = 0x2d3d01e4; ++ *((int *)&__m128_op1[1]) = 0x203e16d1; ++ *((int *)&__m128_op1[0]) = 0x16de012b; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x11000f20; ++ *((int *)&__m128_op0[2]) = 0x10000e20; ++ *((int *)&__m128_op0[1]) = 0x0f000d20; ++ *((int *)&__m128_op0[0]) = 0x0e000c20; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00050005; ++ *((int *)&__m128_op0[2]) = 0x00050005; ++ *((int *)&__m128_op0[1]) = 0x00050005; ++ *((int *)&__m128_op0[0]) = 0x00050005; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x15d926c7; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000e41b; ++ *((int *)&__m128_op1[3]) = 0xfffffacd; ++ *((int *)&__m128_op1[2]) = 0xb6dbecac; ++ *((int *)&__m128_op1[1]) = 0x1f5533a6; ++ *((int *)&__m128_op1[0]) = 0x94f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x04040504; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x04040504; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0001000c; ++ *((int *)&__m128_op0[2]) = 0xfffffff2; ++ *((int *)&__m128_op0[1]) = 0x0001000d; ++ *((int *)&__m128_op0[0]) = 0xfffffff1; ++ *((int *)&__m128_op1[3]) = 0xffff8a17; ++ *((int *)&__m128_op1[2]) = 0xffffc758; ++ *((int *)&__m128_op1[1]) = 0xffff69bb; ++ *((int *)&__m128_op1[0]) = 0xffffad3b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff800000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xff800000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffff1739; ++ *((int *)&__m128_op1[2]) = 0xffff48aa; ++ *((int *)&__m128_op1[1]) = 0xffff2896; ++ *((int *)&__m128_op1[0]) = 0xffff5b88; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000003; ++ *((int *)&__m128_op0[0]) = 0x0000003f; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000003; ++ *((int *)&__m128_op1[0]) = 0x0000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x084d12ce; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x24170000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7474f6fd7474fefe; ++ *((unsigned long *)&__m128d_op0[0]) = 0xf474f6fef474f6fe; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x1817161517161514; ++ *((unsigned long *)&__m128d_op1[0]) = 0x1615141315141312; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0101fe870101fe87; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0101fe8700000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long *)&__m128d_op1[0]) = 0xf0bc9a5278285a4a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000007fffa9ed; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7f8000017fffca8b; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffff7603; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128d_op1[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00ff000100ff00fe; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00ff003000ff00a0; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000005e695e95; ++ *((unsigned long *)&__m128d_op1[0]) = 0x5e695e96c396b402; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000300037ff000ff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0003000300a10003; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffab7e71e33848; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c +new file mode 100644 +index 000000000..ea47baf40 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c +@@ -0,0 +1,269 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000007f; ++ *((int *)&__m128_op0[2]) = 0x0000007f; ++ *((int *)&__m128_op0[1]) = 0x0000007f; ++ *((int *)&__m128_op0[0]) = 0x0000007f; ++ *((int *)&__m128_op1[3]) = 0x3ff00000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xfffc0020; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x2757de72; ++ *((int *)&__m128_op0[2]) = 0x33d771a3; ++ *((int *)&__m128_op0[1]) = 0x166891d5; ++ *((int *)&__m128_op0[0]) = 0x1e8b7eff; ++ *((int *)&__m128_op1[3]) = 0x2757de72; ++ *((int *)&__m128_op1[2]) = 0x33d771a3; ++ *((int *)&__m128_op1[1]) = 0x166891d5; ++ *((int *)&__m128_op1[0]) = 0x1e8b7eff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00fe00ff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffffe; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xffffff02; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000000d; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xfffffe03; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xfffffe03; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xbafebb00; ++ *((int *)&__m128_op1[2]) = 0xffd500fe; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xbffffffe; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x80000000; ++ *((int *)&__m128_op0[2]) = 0x80000000; ++ *((int *)&__m128_op0[1]) = 0x80000000; ++ *((int *)&__m128_op0[0]) = 0x80000000; ++ *((int *)&__m128_op1[3]) = 0x000000ff; ++ *((int *)&__m128_op1[2]) = 0x0000857a; ++ *((int *)&__m128_op1[1]) = 0x05fafe01; ++ *((int *)&__m128_op1[0]) = 0x01fe000e; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7fff7fff; ++ *((int *)&__m128_op0[2]) = 0x7fff7fff; ++ *((int *)&__m128_op0[1]) = 0xbf6b8101; ++ *((int *)&__m128_op0[0]) = 0x81018101; ++ *((int *)&__m128_op1[3]) = 0xe3636363; ++ *((int *)&__m128_op1[2]) = 0x63abdf16; ++ *((int *)&__m128_op1[1]) = 0x41f8e080; ++ *((int *)&__m128_op1[0]) = 0x16161198; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000005d5d; ++ *((unsigned long *)&__m128d_op1[1]) = 0x08fdc221bfdb1927; ++ *((unsigned long *)&__m128d_op1[0]) = 0x4303c67e9b7fb213; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fffffff7ffffffb; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000040002; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffffff000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000d00000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c +new file mode 100644 +index 000000000..68cb5a52f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c +@@ -0,0 +1,335 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xe17fe003; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0x0000ffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00190819; ++ *((int *)&__m128_op1[2]) = 0x00190019; ++ *((int *)&__m128_op1[1]) = 0x00190819; ++ *((int *)&__m128_op1[0]) = 0x00190019; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xfe800000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_op1[3]) = 0x7fffffff; ++ *((int *)&__m128_op1[2]) = 0x82bb9784; ++ *((int *)&__m128_op1[1]) = 0x7fffffff; ++ *((int *)&__m128_op1[0]) = 0xc6bb97ac; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x7f3f0180; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xa2321469; ++ *((int *)&__m128_op0[0]) = 0x7fd03f7f; ++ *((int *)&__m128_op1[3]) = 0x00000406; ++ *((int *)&__m128_op1[2]) = 0x00000406; ++ *((int *)&__m128_op1[1]) = 0x02020202; ++ *((int *)&__m128_op1[0]) = 0x0202fe02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xfffffff5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000014; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000014; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xfffc0004; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x67eb85b0; ++ *((int *)&__m128_op0[2]) = 0xb2ebb001; ++ *((int *)&__m128_op0[1]) = 0xc8847ef6; ++ *((int *)&__m128_op0[0]) = 0xed3f2000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0002de46; ++ *((int *)&__m128_op0[2]) = 0x682de060; ++ *((int *)&__m128_op0[1]) = 0x09b50da6; ++ *((int *)&__m128_op0[0]) = 0xe67b8fc0; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x084d12ce; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x24170000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00ffffff; ++ *((int *)&__m128_op0[0]) = 0x00ffffff; ++ *((int *)&__m128_op1[3]) = 0x0000feff; ++ *((int *)&__m128_op1[2]) = 0x23560000; ++ *((int *)&__m128_op1[1]) = 0x0000fd16; ++ *((int *)&__m128_op1[0]) = 0x54860000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xfffffffe; ++ *((int *)&__m128_op0[0]) = 0xfffff784; ++ *((int *)&__m128_op1[3]) = 0x0177fff0; ++ *((int *)&__m128_op1[2]) = 0xfffffff0; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x011ff8bc; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00ff00ff00ff00fe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffebd06fffe820c; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fff7ffe7fff3506; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7ffffffeffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128d_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128d_op1[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001fffe0001fefc; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0007000000050000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0003000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-vfmadd-vfnmadd-vld.patch b/LoongArch-Add-tests-for-SX-vector-vfmadd-vfnmadd-vld.patch new file mode 100644 index 0000000000000000000000000000000000000000..b639554078cc7270f9b00da6fa79a5c50d4fdf27 --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-vfmadd-vfnmadd-vld.patch @@ -0,0 +1,1412 @@ +From 5cc6bce7753e1029149839d58ed81f046087ad31 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 15:05:09 +0800 +Subject: [PATCH 098/124] LoongArch: Add tests for SX vector + vfmadd/vfnmadd/vld/vst instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vld.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vst.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vfmadd_d.c | 251 ++++++++++++ + .../loongarch/vector/lsx/lsx-vfmadd_s.c | 381 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vfnmadd_d.c | 196 +++++++++ + .../loongarch/vector/lsx/lsx-vfnmadd_s.c | 381 ++++++++++++++++++ + .../gcc.target/loongarch/vector/lsx/lsx-vld.c | 62 +++ + .../gcc.target/loongarch/vector/lsx/lsx-vst.c | 70 ++++ + 6 files changed, 1341 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c +new file mode 100644 +index 000000000..c5de1ac7a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c +@@ -0,0 +1,251 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x8a228acac14e440a; ++ *((unsigned long *)&__m128d_op1[0]) = 0xc77c47cdc0f16549; ++ *((unsigned long *)&__m128d_op2[1]) = 0xffffffffd24271c4; ++ *((unsigned long *)&__m128d_op2[0]) = 0x2711bad1e8e309ed; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffd24271c4; ++ *((unsigned long *)&__m128d_result[0]) = 0x2711bad1e8e309ed; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffe000ffff1fff; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000003f80b0; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0080200000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000401000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000080000000000; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000000001e; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m128d_op2[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long *)&__m128d_op2[0]) = 0xfff8000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff8000000000000; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000009000900; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000009000900; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000009000900; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000009000900; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x9c83e21a22001818; ++ *((unsigned long *)&__m128d_op0[0]) = 0xdd3b8b02563b2d7b; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7f7f7f007f7f7f00; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x7f7f7f007f7f7f00; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff01e41ffff0ffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x5555000054100000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x5555000154100155; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000010; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfe00fd1400010000; ++ *((unsigned long *)&__m128d_op2[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128d_op2[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0038d800ff000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00fffe00fffffe00; ++ *((unsigned long *)&__m128d_op2[1]) = 0x8000008000008080; ++ *((unsigned long *)&__m128d_op2[0]) = 0x8080800000800080; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000008000008080; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00ff80ff00ff80ff; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000103; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000100000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000103; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c +new file mode 100644 +index 000000000..6b85e87bd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c +@@ -0,0 +1,381 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000002; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000002; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0028e0a1; ++ *((int *)&__m128_op0[2]) = 0xa000a041; ++ *((int *)&__m128_op0[1]) = 0x01000041; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x01000001; ++ *((int *)&__m128_op1[1]) = 0x00010001; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x01000001; ++ *((int *)&__m128_op2[1]) = 0xffffe000; ++ *((int *)&__m128_op2[0]) = 0xffff1fff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x01000001; ++ *((int *)&__m128_result[1]) = 0xffffe000; ++ *((int *)&__m128_result[0]) = 0xffff1fff; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7f800000; ++ *((int *)&__m128_op0[2]) = 0x7f800000; ++ *((int *)&__m128_op0[1]) = 0x7f800000; ++ *((int *)&__m128_op0[0]) = 0x7f800000; ++ *((int *)&__m128_op1[3]) = 0x00000002; ++ *((int *)&__m128_op1[2]) = 0x00000002; ++ *((int *)&__m128_op1[1]) = 0x00000003; ++ *((int *)&__m128_op1[0]) = 0x00000003; ++ *((int *)&__m128_op2[3]) = 0x3fc00000; ++ *((int *)&__m128_op2[2]) = 0x3fc00000; ++ *((int *)&__m128_op2[1]) = 0x3fc00000; ++ *((int *)&__m128_op2[0]) = 0x3fc00000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xc1bdceee; ++ *((int *)&__m128_op0[2]) = 0x242070db; ++ *((int *)&__m128_op0[1]) = 0xe8c7b756; ++ *((int *)&__m128_op0[0]) = 0xd76aa478; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7f400000; ++ *((int *)&__m128_op0[2]) = 0x7f040000; ++ *((int *)&__m128_op0[1]) = 0x7f020000; ++ *((int *)&__m128_op0[0]) = 0x7f020000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0x0014002c; ++ *((int *)&__m128_op1[1]) = 0xfffefffe; ++ *((int *)&__m128_op1[0]) = 0x003b0013; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0x3ea5016b; ++ *((int *)&__m128_result[1]) = 0xfffefffe; ++ *((int *)&__m128_result[0]) = 0x3f6fb04d; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x004f0080; ++ *((int *)&__m128_op0[2]) = 0x004f0080; ++ *((int *)&__m128_op0[1]) = 0x004f0080; ++ *((int *)&__m128_op0[0]) = 0x004f0080; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x7fff7fff; ++ *((int *)&__m128_op2[2]) = 0x7fff7fff; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7fff7fff; ++ *((int *)&__m128_result[2]) = 0x7fff7fff; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x3d3d3d3d; ++ *((int *)&__m128_op0[2]) = 0x3d3d3d3d; ++ *((int *)&__m128_op0[1]) = 0x3d3d3d3d; ++ *((int *)&__m128_op0[0]) = 0x3d3d3d3d; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00100000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x0000bd3d; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00050005; ++ *((int *)&__m128_op1[2]) = 0x00050005; ++ *((int *)&__m128_op1[1]) = 0x00050005; ++ *((int *)&__m128_op1[0]) = 0x00050005; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xe500c085; ++ *((int *)&__m128_op0[2]) = 0xc000c005; ++ *((int *)&__m128_op0[1]) = 0xe5c1a185; ++ *((int *)&__m128_op0[0]) = 0xc48004c5; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffc000; ++ *((int *)&__m128_op1[0]) = 0xffffc005; ++ *((int *)&__m128_op2[3]) = 0xff550025; ++ *((int *)&__m128_op2[2]) = 0x002a004b; ++ *((int *)&__m128_op2[1]) = 0x00590013; ++ *((int *)&__m128_op2[0]) = 0x005cffca; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffffc000; ++ *((int *)&__m128_result[0]) = 0xffffc005; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00fe0001; ++ *((int *)&__m128_op1[2]) = 0x00cf005f; ++ *((int *)&__m128_op1[1]) = 0x7fff7fff; ++ *((int *)&__m128_op1[0]) = 0x7fff7f00; ++ *((int *)&__m128_op2[3]) = 0x5d7f5d00; ++ *((int *)&__m128_op2[2]) = 0x7f6a007f; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x5d7f5d00; ++ *((int *)&__m128_result[2]) = 0x7f6a007f; ++ *((int *)&__m128_result[1]) = 0x7fff7fff; ++ *((int *)&__m128_result[0]) = 0x7fff7f00; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00680486; ++ *((int *)&__m128_op0[2]) = 0xffffffda; ++ *((int *)&__m128_op0[1]) = 0xffff913b; ++ *((int *)&__m128_op0[0]) = 0xb9951901; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x01030103; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00200060; ++ *((int *)&__m128_op2[0]) = 0x00200060; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0xffffffda; ++ *((int *)&__m128_result[1]) = 0xffff913b; ++ *((int *)&__m128_result[0]) = 0x001fed4d; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x001a001a; ++ *((int *)&__m128_op0[2]) = 0x001a0008; ++ *((int *)&__m128_op0[1]) = 0x001a001a; ++ *((int *)&__m128_op0[0]) = 0x001a000b; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xff800001; ++ *((int *)&__m128_op1[0]) = 0x0f800000; ++ *((int *)&__m128_op2[3]) = 0xff800000; ++ *((int *)&__m128_op2[2]) = 0xff800000; ++ *((int *)&__m128_op2[1]) = 0xff800000; ++ *((int *)&__m128_op2[0]) = 0xff800000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffc00001; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfe3bfb01; ++ *((int *)&__m128_op0[2]) = 0xfe3bfe01; ++ *((int *)&__m128_op0[1]) = 0xfe03fe3f; ++ *((int *)&__m128_op0[0]) = 0xfe01fa21; ++ *((int *)&__m128_op1[3]) = 0xfe3bfb01; ++ *((int *)&__m128_op1[2]) = 0xfe3bfe01; ++ *((int *)&__m128_op1[1]) = 0xfe03fe3f; ++ *((int *)&__m128_op1[0]) = 0xfe01fa21; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffe001; ++ *((int *)&__m128_op0[2]) = 0xffffe001; ++ *((int *)&__m128_op0[1]) = 0xffffe001; ++ *((int *)&__m128_op0[0]) = 0xffffe001; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffe000; ++ *((int *)&__m128_op1[0]) = 0x01ffe200; ++ *((int *)&__m128_op2[3]) = 0x04040383; ++ *((int *)&__m128_op2[2]) = 0x83838404; ++ *((int *)&__m128_op2[1]) = 0x04040383; ++ *((int *)&__m128_op2[0]) = 0x83838404; ++ *((int *)&__m128_result[3]) = 0xffffe001; ++ *((int *)&__m128_result[2]) = 0xffffe001; ++ *((int *)&__m128_result[1]) = 0xffffe001; ++ *((int *)&__m128_result[0]) = 0xffffe001; ++ __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x18171615; ++ *((int *)&__m128_op0[2]) = 0x17161514; ++ *((int *)&__m128_op0[1]) = 0x16151413; ++ *((int *)&__m128_op0[0]) = 0x151d3756; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x39412488; ++ *((int *)&__m128_op1[0]) = 0x80000000; ++ *((int *)&__m128_op2[3]) = 0x3ff00000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x40f3fa00; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xbff00000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xc0f3fa00; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000005; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x3ddc5dac; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x63636b6a; ++ *((int *)&__m128_op0[2]) = 0xfe486741; ++ *((int *)&__m128_op0[1]) = 0x41f8e880; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xe3636363; ++ *((int *)&__m128_op1[2]) = 0x63abdf16; ++ *((int *)&__m128_op1[1]) = 0x41f8e080; ++ *((int *)&__m128_op1[0]) = 0x16161198; ++ *((int *)&__m128_op2[3]) = 0x00c27580; ++ *((int *)&__m128_op2[2]) = 0x00bccf42; ++ *((int *)&__m128_op2[1]) = 0x00a975be; ++ *((int *)&__m128_op2[0]) = 0x00accf03; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0x4471fb84; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c +new file mode 100644 +index 000000000..96b14aad6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c +@@ -0,0 +1,196 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xef0179a47c793879; ++ *((unsigned long *)&__m128d_op0[0]) = 0x9f9e7e3e9ea3ff41; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffc000007fc00000; ++ *((unsigned long *)&__m128d_result[0]) = 0x9e801ffc7fc00000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000ffff00000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000ffff00000000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000008800022; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128d_op2[1]) = 0xb8ec43befe38e64b; ++ *((unsigned long *)&__m128d_op2[0]) = 0x6477d042343cce24; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffbfffffffbf; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000060000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfffffffffffff000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffffffafffffffa; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffffafffffffa; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m128d_op1[0]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff76ffd8ffe6ffaa; ++ *((unsigned long *)&__m128d_op1[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long *)&__m128d_op1[0]) = 0xff76ffd8ffe6ffaa; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0303030303030303; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0001ffff00000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128d_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x3c600000ff800000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfffffffffffffffe; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x00000000b5207f80; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x00000000b5207f80; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000009000900; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000009000900; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x00000000ffffffff; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c +new file mode 100644 +index 000000000..bf8414b49 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c +@@ -0,0 +1,381 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xfffffffe; ++ *((int *)&__m128_op0[0]) = 0xbe6ed565; ++ *((int *)&__m128_op1[3]) = 0x195f307a; ++ *((int *)&__m128_op1[2]) = 0x5d04acbb; ++ *((int *)&__m128_op1[1]) = 0x6a1a3fbb; ++ *((int *)&__m128_op1[0]) = 0x3c90260e; ++ *((int *)&__m128_op2[3]) = 0xffffffff; ++ *((int *)&__m128_op2[2]) = 0xffffffff; ++ *((int *)&__m128_op2[1]) = 0xfffffffe; ++ *((int *)&__m128_op2[0]) = 0xbe6ed565; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xfffffffe; ++ *((int *)&__m128_result[0]) = 0x3e730941; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xff01ff01; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0xffffffff; ++ *((int *)&__m128_op2[2]) = 0xffffffff; ++ *((int *)&__m128_op2[1]) = 0xffffffff; ++ *((int *)&__m128_op2[0]) = 0xff01ff01; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0x7f01ff01; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((int *)&__m128_op2[3]) = 0x00307028; ++ *((int *)&__m128_op2[2]) = 0x003f80b0; ++ *((int *)&__m128_op2[1]) = 0x0040007f; ++ *((int *)&__m128_op2[0]) = 0xff800000; ++ *((int *)&__m128_result[3]) = 0x80307028; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0x8040007f; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000049; ++ *((int *)&__m128_op0[2]) = 0x0000004d; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000001; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000001; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000001; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffff0000; ++ *((int *)&__m128_op0[1]) = 0x00ff0000; ++ *((int *)&__m128_op0[0]) = 0x00ff0000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000800; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0xffffffff; ++ *((int *)&__m128_op2[2]) = 0xfffff800; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xfffff800; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00030000; ++ *((int *)&__m128_op0[2]) = 0x00010000; ++ *((int *)&__m128_op0[1]) = 0x00020000; ++ *((int *)&__m128_op0[0]) = 0x00010000; ++ *((int *)&__m128_op1[3]) = 0x3f800000; ++ *((int *)&__m128_op1[2]) = 0x3f800000; ++ *((int *)&__m128_op1[1]) = 0x3f800000; ++ *((int *)&__m128_op1[0]) = 0x3f800000; ++ *((int *)&__m128_op2[3]) = 0x00030000; ++ *((int *)&__m128_op2[2]) = 0x00010000; ++ *((int *)&__m128_op2[1]) = 0x00020000; ++ *((int *)&__m128_op2[0]) = 0x00010000; ++ *((int *)&__m128_result[3]) = 0x80060000; ++ *((int *)&__m128_result[2]) = 0x80020000; ++ *((int *)&__m128_result[1]) = 0x80040000; ++ *((int *)&__m128_result[0]) = 0x80020000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000008; ++ *((int *)&__m128_op0[2]) = 0x97957687; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000408; ++ *((int *)&__m128_op1[3]) = 0x00000008; ++ *((int *)&__m128_op1[2]) = 0x97957687; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000408; ++ *((int *)&__m128_op2[3]) = 0x00010001; ++ *((int *)&__m128_op2[2]) = 0x00010001; ++ *((int *)&__m128_op2[1]) = 0x00010001; ++ *((int *)&__m128_op2[0]) = 0x04000800; ++ *((int *)&__m128_result[3]) = 0x80010001; ++ *((int *)&__m128_result[2]) = 0x80010001; ++ *((int *)&__m128_result[1]) = 0x80010001; ++ *((int *)&__m128_result[0]) = 0x84000800; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffc2ffe7; ++ *((int *)&__m128_op0[2]) = 0x00000007; ++ *((int *)&__m128_op0[1]) = 0x0000ffc1; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((int *)&__m128_op1[3]) = 0xffc2ffe7; ++ *((int *)&__m128_op1[2]) = 0x00000007; ++ *((int *)&__m128_op1[1]) = 0x0000ffc1; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x000ffc2f; ++ *((int *)&__m128_op2[1]) = 0x00201df0; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffc2ffe7; ++ *((int *)&__m128_result[2]) = 0x800ffc2f; ++ *((int *)&__m128_result[1]) = 0x80201df0; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000005; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x80808080; ++ *((int *)&__m128_op0[2]) = 0x80808080; ++ *((int *)&__m128_op0[1]) = 0x80808080; ++ *((int *)&__m128_op0[0]) = 0x80800008; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x31313131; ++ *((int *)&__m128_op0[0]) = 0x31313131; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x31313131; ++ *((int *)&__m128_op1[0]) = 0x31313131; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000008; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000008; ++ *((int *)&__m128_result[1]) = 0xa2f54a1e; ++ *((int *)&__m128_result[0]) = 0xa2f54a1e; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xa486c90f; ++ *((int *)&__m128_op0[2]) = 0x157ca12e; ++ *((int *)&__m128_op0[1]) = 0x58bcc201; ++ *((int *)&__m128_op0[0]) = 0x2e635d65; ++ *((int *)&__m128_op1[3]) = 0x6d564875; ++ *((int *)&__m128_op1[2]) = 0xf8760005; ++ *((int *)&__m128_op1[1]) = 0x8dc5a4d1; ++ *((int *)&__m128_op1[0]) = 0x79ffa22f; ++ *((int *)&__m128_op2[3]) = 0xffffffff; ++ *((int *)&__m128_op2[2]) = 0xd2436487; ++ *((int *)&__m128_op2[1]) = 0x0fa96b88; ++ *((int *)&__m128_op2[0]) = 0x5f94ab13; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xd24271c4; ++ *((int *)&__m128_result[1]) = 0x2711bad1; ++ *((int *)&__m128_result[0]) = 0xe8e309ed; ++ __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x007ffd00; ++ *((int *)&__m128_op2[0]) = 0x01400840; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x007ffd00; ++ *((int *)&__m128_result[0]) = 0x01400840; ++ __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x7f800000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0xcd636363; ++ *((int *)&__m128_op2[2]) = 0xcd636363; ++ *((int *)&__m128_op2[1]) = 0xcd636363; ++ *((int *)&__m128_op2[0]) = 0xcd636363; ++ *((int *)&__m128_result[3]) = 0xcd636363; ++ *((int *)&__m128_result[2]) = 0xcd636363; ++ *((int *)&__m128_result[1]) = 0xcd636363; ++ *((int *)&__m128_result[0]) = 0xcd636363; ++ __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c +new file mode 100644 +index 000000000..7cd9abb7c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c +@@ -0,0 +1,62 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_result[0]) = 0x3ab7a3fc47a5c31a; ++ __m128i_out = __lsx_vld ((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_result[0]) = 0x3ab7a3fc47a5c31a; ++ __m128i_out = __lsx_vldx ((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0xc3c3c3c3c3c3c3c3; ++ *((unsigned long *)&__m128i_result[0]) = 0xc3c3c3c3c3c3c3c3; ++ __m128i_out = __lsx_vldrepl_b ((unsigned long *)&__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0xc31ac31ac31ac31a; ++ *((unsigned long *)&__m128i_result[0]) = 0xc31ac31ac31ac31a; ++ __m128i_out = __lsx_vldrepl_h ((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x47a5c31a47a5c31a; ++ *((unsigned long *)&__m128i_result[0]) = 0x47a5c31a47a5c31a; ++ __m128i_out = __lsx_vldrepl_w ((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[0]) = 0x3ab7a3fc47a5c31a; ++ __m128i_out = __lsx_vldrepl_d ((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c +new file mode 100644 +index 000000000..8afdffa50 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c +@@ -0,0 +1,70 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0; ++ *((unsigned long *)&__m128i_result[0]) = 0x0; ++ __lsx_vst (__m128i_op0, (unsigned long *)&__m128i_result, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_op0, __m128i_result); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0; ++ *((unsigned long *)&__m128i_result[0]) = 0x0; ++ __lsx_vstx (__m128i_op0, (unsigned long *)&__m128i_result, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_op0, __m128i_result); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0; ++ *((unsigned long *)&__m128i_result[0]) = 0x05; ++ *((unsigned long *)&__m128i_out[1]) = 0x0; ++ *((unsigned long *)&__m128i_out[0]) = 0x0; ++ __lsx_vstelm_b (__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0; ++ *((unsigned long *)&__m128i_result[0]) = 0x5c05; ++ *((unsigned long *)&__m128i_out[1]) = 0x0; ++ *((unsigned long *)&__m128i_out[0]) = 0x0; ++ __lsx_vstelm_h (__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0; ++ *((unsigned long *)&__m128i_result[0]) = 0xc9d85c05; ++ *((unsigned long *)&__m128i_out[1]) = 0x0; ++ *((unsigned long *)&__m128i_out[0]) = 0x0; ++ __lsx_vstelm_w (__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0; ++ *((unsigned long *)&__m128i_result[0]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_out[1]) = 0x0; ++ *((unsigned long *)&__m128i_out[0]) = 0x0; ++ __lsx_vstelm_d (__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-vfrstp-vfrstpi-vse.patch b/LoongArch-Add-tests-for-SX-vector-vfrstp-vfrstpi-vse.patch new file mode 100644 index 0000000000000000000000000000000000000000..59ba09c7a475a2f3136654d01c291d14e4b4c1b3 --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-vfrstp-vfrstpi-vse.patch @@ -0,0 +1,3926 @@ +From 06a477566d282d87ce187901904c4bae2c2c4aaf Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 11:28:29 +0800 +Subject: [PATCH 094/124] LoongArch: Add tests for SX vector + vfrstp/vfrstpi/vseq/vseqi/vsle /vslei/vslt/vslti instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vfrstp.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vseq.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vseqi.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsle-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsle-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vslei-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vslei-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vslt-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vslt-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vslti-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vslti-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vfrstp.c | 218 ++++++++ + .../loongarch/vector/lsx/lsx-vfrstpi.c | 209 ++++++++ + .../loongarch/vector/lsx/lsx-vseq.c | 470 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vseqi.c | 328 ++++++++++++ + .../loongarch/vector/lsx/lsx-vsle-1.c | 290 +++++++++++ + .../loongarch/vector/lsx/lsx-vsle-2.c | 444 +++++++++++++++++ + .../loongarch/vector/lsx/lsx-vslei-1.c | 258 ++++++++++ + .../loongarch/vector/lsx/lsx-vslei-2.c | 293 +++++++++++ + .../loongarch/vector/lsx/lsx-vslt-1.c | 434 ++++++++++++++++ + .../loongarch/vector/lsx/lsx-vslt-2.c | 236 +++++++++ + .../loongarch/vector/lsx/lsx-vslti-1.c | 328 ++++++++++++ + .../loongarch/vector/lsx/lsx-vslti-2.c | 293 +++++++++++ + 12 files changed, 3801 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c +new file mode 100644 +index 000000000..ac0ade8b1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c +@@ -0,0 +1,218 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe07e5fefefdddfe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020100fedd0c00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0005000501800005; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe07e5fefefdddfe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00020100fedd0008; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op2[1]) = 0x03ff03ff03ff03ff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000003f803f4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000003f803f4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001003f803f4; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000020000007d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000746400016388; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000586100015567; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0800000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000020000007d; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0008; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x61608654a2d4f6da; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ff08ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x36fbdfdcffdcffdc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000008140c80; ++ *((unsigned long *)&__m128i_op2[1]) = 0x1f1f1f1f1f1f1f00; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1f1f1f27332b9f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x36fbdfdcffdc0008; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000aaaa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000545cab1d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000081a83bea; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00d3007c014e00bd; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000aaaa; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000003a0000003a; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x37c0001000000008; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080800008; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_result[1]) = 0x00081f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_result[0]) = 0x1f1f1f1f1f1f1f1f; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000400080003fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000bc2000007e10; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000400080003fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000bc2000007e04; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a753500950fa306; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff14eb54ab; ++ *((unsigned long *)&__m128i_op1[0]) = 0x14ea6a002a406a00; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00007fff7fff8000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a753500950fa306; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x02b010f881a281a2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_op1[1]) = 0x02b010f881a281a2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x02b010f881a281a2; ++ *((unsigned long *)&__m128i_result[0]) = 0x27b169bbb8140001; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000155; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff100000000000; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c +new file mode 100644 +index 000000000..a2b110f21 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c +@@ -0,0 +1,209 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0027002a00030018; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f4300177f7a7f59; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0027002a00080018; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f4300177f7a7f59; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000007f00000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000110000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000007f00000004; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000800000000; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x75b043c4d17db125; ++ *((unsigned long *)&__m128i_op0[0]) = 0xeef8227b4f8017b1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x75b043c4007db125; ++ *((unsigned long *)&__m128i_result[0]) = 0xeef8227b4f8017b1; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x03c0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x03c0038000000380; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff000000ff00; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000010a000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ffff000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000010a000b; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5b35342c979955da; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m128i_result[0]) = 0x5b35342c970455da; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000000000000; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00d3012b015700bb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001002affca0070; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00d3012b015700bb; ++ *((unsigned long *)&__m128i_result[0]) = 0x00010000ffca0070; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000bf; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000002bb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00080000fffe0001; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000545cffffab1d; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff81a800003bea; ++ *((unsigned long *)&__m128i_op1[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128i_op1[0]) = 0x545cab1d81a83bea; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000545cffff0001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff81a800003bea; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001b; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0008000000000000; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x379674c000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x379674c000000000; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001a001a001a000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001a001a001a000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001a001a001a000b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001a001a001a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x001a001a001a0008; ++ *((unsigned long *)&__m128i_result[0]) = 0x001a001a001a000b; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x02f3030303030303; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x02f3030303100303; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007770ffff941d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007770ffff941d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007770ffff941d; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c +new file mode 100644 +index 000000000..4362941ab +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c +@@ -0,0 +1,470 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ed0008005e00a2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x007a007600150077; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ed0008005e00a2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007a007600150077; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c63636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff489b693120950; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc45a851c40c18; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfda9b23a624082fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff7f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2d1da85b7f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7505853d654185f5; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01010000fefe0101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1fc000001fc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1fc000001fc00000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000067400002685; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9795698585057dec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x87f82867431a1d08; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1149a96eb1a08000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffe1ffffffe1; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffe1ffffffe1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m128i_op1[0]) = 0x363d753d50155c0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f0f0f0f00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000fffe01fd02; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5b5b5b5aadadadad; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000052525253; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff00ffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00ffffffffff; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x33f5c2d7d9f5d800; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe4c23ffb002a3a22; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000004870ba0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000044470000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff0000ffff; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000404040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000005c000000b2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000007600000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffffffff; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c0dec4d1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000048; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffeffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000090900000998; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff00ffffff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f7f7f007f7f7f00; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf2c97aaa7d8fa270; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0b73e427f7cfcb88; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff01fe03ff01fe03; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c +new file mode 100644 +index 000000000..c16a291de +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c +@@ -0,0 +1,328 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff800000c3080002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfedb27095b6bff95; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040000000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000001000f00fe00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000017fff00fe7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x007ffd0001400840; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff01ff010000ff7d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000fffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffa6ff91fdd8ef77; ++ *((unsigned long *)&__m128i_op0[0]) = 0x061202bffb141c38; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fef01000f27ca; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2a29282726252423; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff80ff00ff80ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0313100003131000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0313100003131000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001a0000000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000002a001a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000001a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x05f5e2320605e1e2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0d060d060d060d06; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0d060d060d060d06; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff2356fe165486; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5efeb3165bd7653d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c +new file mode 100644 +index 000000000..a26eb0a3d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c +@@ -0,0 +1,290 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x004200a000200000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000aaaaaaaa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000aaab555b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000aaaaaaaa; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000aaab555b; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000408; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000ed0e0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000004080; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004cff8fffde0051; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000040400000404; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000040400000404; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000501000002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000008; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x55aa55aa55aa55ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaa55555655aaaaa8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00000000ffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6b6c4beb636443e3; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0507070805070708; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000085af0000b000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00017ea200002000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc0800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100000001000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000005003a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c +new file mode 100644 +index 000000000..15c6cedc2 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c +@@ -0,0 +1,444 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0005000400000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0400001001150404; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0005000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0400001001150404; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000008680f1ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0280000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffffff00000000; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000036280000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x42a0000042a02000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff80ff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff80000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff0600d50e9ef518; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffefffa8007c000f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000001faea9ec; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100007f01; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfbfbfb17fbfb38ea; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfbfb47fbfbfb0404; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000005fffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000003fffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000040002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000bffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f7f7f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff040; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001000100010c410; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcafff8ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffcafff8ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000007f0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000ed0e0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000004080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000ed0e0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000004080; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x8); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003030000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00fffbfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01ff1100000048; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c +new file mode 100644 +index 000000000..0e72a33dd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c +@@ -0,0 +1,258 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_b (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_b (__m128i_op0, -6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_b (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000200008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffff00ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffff00ffff; ++ __m128i_out = __lsx_vslei_b (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffffff00ffffff; ++ __m128i_out = __lsx_vslei_b (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff00008080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x31dc2cc1bc268c93; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c4d53d855f89514; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00000000ffff; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fc000003fc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fc000003fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff6080ffff4417; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5779108fdedda7e4; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000001fc1a568; ++ *((unsigned long *)&__m128i_op0[0]) = 0x02693fe0e7beb077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, -6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf1819b7c0732a6b6; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffb9917a6e7fffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0037ffc8d7ff2800; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf03ef03ef03ef03e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf03ef03ef03ef03e; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c +new file mode 100644 +index 000000000..685a1bb36 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c +@@ -0,0 +1,293 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd82480697f678077; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff489b693120950; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffc45a851c40c18; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1268f057137a0267; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0048137ef886fae0; ++ *((unsigned long *)&__m128i_result[1]) = 0xff000000ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00ff0000000000; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000202fe02; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff00ff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff7a53; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001b4a00007808; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003900; ++ *((unsigned long *)&__m128i_op0[0]) = 0x68bcf93435ed25ed; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000f0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc605c000aedd0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00250023001c001d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x309d2f342a5d2b34; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c +new file mode 100644 +index 000000000..15c96ccfe +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c +@@ -0,0 +1,434 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007658000115de0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001a8960001d2cc0; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffc000007fc00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9e801ffc7fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ffff0000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff00ff0000ff; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000040100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000384; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe3f0200004003ffd; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff00ff00ff00; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000001ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f0101070101010f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000127f010116; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ffffffffff; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000ffef0010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff0000ff0000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff00000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff02000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000002a001a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001a000b00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff001a00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffc0ffc0003f; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffc0ffc0003f003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff0000000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff00ff; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0403cfcf01c1595e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x837cd5db43fc55d4; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0004000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004000000040000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffefefffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffefefffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000080000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000080000000800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00000000ffff; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000024170000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff000086bd; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ca000000c481; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00050eb00000fffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000f8a50000f310; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000011f0000f040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff8bc; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001000100010c410; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x800000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ffffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_op0[0]) = 0x803f800080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001f5400000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002008360500088; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400028000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000467fef81; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c63636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4eede8494f000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1817161517161514; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1615141315141312; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0fff0fff7f800fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c +new file mode 100644 +index 000000000..e8d69f0e9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c +@@ -0,0 +1,236 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0007658000115de0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001a8960001d2cc0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffffff00ffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ffffff; ++ __m128i_out = __lsx_vslt_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vslt_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff359f358; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffff359f358; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffff00ff00; ++ __m128i_out = __lsx_vslt_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x317fce80317fce80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fffe0000fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vslt_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007658000115de0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001a8960001d2cc0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001c88bf0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000320; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007730; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4050000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636163636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000145ad; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000300003e6e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00005dcbe7e830c0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x03f21e0114bf19da; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5e695e95e1cb5a01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0313100003131000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0313100003131000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000010a7; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000046ebaa2c; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf1f1f1f1865e65a1; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00050eb00000fffa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000f8a50000f310; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c +new file mode 100644 +index 000000000..5bf3ce6e8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c +@@ -0,0 +1,328 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00feff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00feff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffff0000000000; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffefffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffefffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, 8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x807f7f8000ffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff00feff00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff0000ffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x195f307a5d04acbb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a3a3a3b3a3a3a3a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3a3a00003a3a0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5ff6a0a40ea8f47c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5ff6a0a40e9da42a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200001; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd82480697f678077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffe15; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffe15; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc000ffffc005; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0006000100040001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00010002ffff0105; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa000308000008002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0500847b00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000005e695e95; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5e695e96c396b402; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0103000201030002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x63636b6afe486741; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e880ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c +new file mode 100644 +index 000000000..768df528f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c +@@ -0,0 +1,293 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff0000ffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff0000ffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000008a0000008a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000008900000009; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x371fe00000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x371fe00000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbe8282a0793636d3; ++ *((unsigned long *)&__m128i_op0[0]) = 0x793636d3793636d3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2e2b34ca59fa4c88; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000100000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000080000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7505445465593af1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00010000ffab001c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001ffffffadff9a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff7300000ca00430; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001a00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000009c83e21a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000022001818; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0013; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007000000050000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100010001000100; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x975ca6046e2e4889; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000235600005486; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000b31600006544; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000007e8a60; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000001edde; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0aa077b7054c9554; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40c7ee1f38e4c4e8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6b75948a91407a42; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0b5471b633e54fde; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-vmax-vmaxi-vmin-vm.patch b/LoongArch-Add-tests-for-SX-vector-vmax-vmaxi-vmin-vm.patch new file mode 100644 index 0000000000000000000000000000000000000000..727fdc45746369d277a8142a6d8427d0680196ab --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-vmax-vmaxi-vmin-vm.patch @@ -0,0 +1,2578 @@ +From dd0b9d05c2e18dc8082931dbfe612bb1acf9e5e9 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 18:38:46 +0800 +Subject: [PATCH 085/124] LoongArch: Add tests for SX vector + vmax/vmaxi/vmin/vmini instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vmax-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmax-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmin-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmin-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmini-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmini-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vmax-1.c | 317 +++++++++++++ + .../loongarch/vector/lsx/lsx-vmax-2.c | 362 +++++++++++++++ + .../loongarch/vector/lsx/lsx-vmaxi-1.c | 279 +++++++++++ + .../loongarch/vector/lsx/lsx-vmaxi-2.c | 223 +++++++++ + .../loongarch/vector/lsx/lsx-vmin-1.c | 434 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vmin-2.c | 344 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vmini-1.c | 314 +++++++++++++ + .../loongarch/vector/lsx/lsx-vmini-2.c | 216 +++++++++ + 8 files changed, 2489 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c +new file mode 100644 +index 000000000..b0e22f955 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c +@@ -0,0 +1,317 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f007f007f007f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010000003f; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000010000f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000010000f01; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffdfffcfffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff80df00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010100000100000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100000101000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010100000100000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100000101000; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0040000000ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0040000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0040000000ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xb327b9363c992b2e; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa1e7b475d925730f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff3c992b2e; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff730f; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000001ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000001ff; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000003d0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000003d0000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007001400000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000053a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000700140000053a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004001000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80c400000148; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff80c1ffffe8de; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000148; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000034; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x000a000a000a000a; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000b3a6000067da; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00004e420000c26a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5779108fdedda7e4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000b3a6000067da; ++ *((unsigned long *)&__m128i_result[0]) = 0x5779108f0000c26a; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x30eb020302101b03; ++ *((unsigned long *)&__m128i_op1[0]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_result[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_result[0]) = 0x020310edc003023d; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe03fe01fe01fe01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe3bfa3ffe3bfb21; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001d001d001d001d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001d001d001d0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001d001d001d001d; ++ *((unsigned long *)&__m128i_result[0]) = 0x001d001d001d0000; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000155; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000155; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000051649b6; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd2f005e44bb43416; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000003e0000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000051649b6; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000003e0000003f; ++ __m128i_out = __lsx_vmax_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00012c8a0000a58a; ++ __m128i_out = __lsx_vmax_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c +new file mode 100644 +index 000000000..51a9a92e8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c +@@ -0,0 +1,362 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000007f0000; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x77c0404a4000403a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x77c03fd640003fc6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000003a0000003a; ++ *((unsigned long *)&__m128i_result[1]) = 0x77c0404a4000403a; ++ *((unsigned long *)&__m128i_result[0]) = 0x77c03fd640003fc6; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op0[0]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5b5b5b5aa4a4a4a6; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x5b5b5b5aadadadad; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe03fe3ffe01fa21; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000f50000007500; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007e1600007d98; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000fe00fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000f50000fe75fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe7efe00fe7dfe; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2002040404010420; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101010180800101; ++ *((unsigned long *)&__m128i_result[1]) = 0x2002040404010420; ++ *((unsigned long *)&__m128i_result[0]) = 0x9c9c9c9c80800101; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff0; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffdf; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf001f0010101f002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x685670d27e00682a; ++ *((unsigned long *)&__m128i_result[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long *)&__m128i_result[0]) = 0x685670d27e00682a; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_result[0]) = 0x27b169bbb8145f50; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80ff0010ff06; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007f01000eff0a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff80ff0010ff06; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3ff0000000007fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000002bfd9461; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffff0000000ad3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff000fffff000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffff00010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff000fffff000; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001f; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc605c000aedd0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xc605c000aedd0000; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x09e8e9012fded7fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x479f64b03373df61; ++ *((unsigned long *)&__m128i_result[1]) = 0x09e8e9012fded7fd; ++ *((unsigned long *)&__m128i_result[0]) = 0x479f64b03373df61; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_result[1]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_result[0]) = 0x52525252adadadad; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080700000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c +new file mode 100644 +index 000000000..7cff1d848 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c +@@ -0,0 +1,279 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a0a0a0a0a0a0a0a; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003be14000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000003bfb4000; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0b0b0b0b0b0b0b0b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0b0b0b0b0b0b0b0b; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007ffffffb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x010101017f010101; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0b0b0b0b0b0b0b0b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0b0b0b0b0b0b0b0b; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000c; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0606060606060606; ++ *((unsigned long *)&__m128i_result[0]) = 0x0606060606060606; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x027c027c000027c0; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, -6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000001fc00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000001fc00000000; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000fff; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000a1ff4c; ++ *((unsigned long *)&__m128i_result[1]) = 0x000300037ff000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000300a10003; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000b000b000b000b; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe07e5fefefdddfe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020100fedd0c00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000201000000000b; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000004; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001f0a; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000050000007b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000500000005; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001fffff001fffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001fffff001fffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x001fffff001fffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x001fffff001fffff; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000b0000000b; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000e0000000e; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000900000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000900000009; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000600000006; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f80000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f80000000000007; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000700000007; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vmaxi_d (__m128i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000007f00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000007f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000000; ++ __m128i_out = __lsx_vmaxi_d (__m128i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff489b693120950; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffc45a851c40c18; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000a; ++ __m128i_out = __lsx_vmaxi_d (__m128i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_d (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x63636b6afe486741; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e880ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x63636b6afe486741; ++ *((unsigned long *)&__m128i_result[0]) = 0x41f8e880ffffffff; ++ __m128i_out = __lsx_vmaxi_d (__m128i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c +new file mode 100644 +index 000000000..b79af2228 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c +@@ -0,0 +1,223 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000020002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_result[0]) = 0x0303030303030303; ++ __m128i_out = __lsx_vmaxi_bu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111111111111111; ++ __m128i_out = __lsx_vmaxi_bu (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111111111111111; ++ __m128i_out = __lsx_vmaxi_bu (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a0a0a0a0a0a0a0a; ++ __m128i_out = __lsx_vmaxi_bu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0011001100110011; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long *)&__m128i_result[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f000d200e000c20; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fffefffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0x000fffefffefffef; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0005000500050005; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x001d001d20000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x001d001d20000020; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003fff00010000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00123fff00120012; ++ *((unsigned long *)&__m128i_result[0]) = 0x0012001200120012; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001a001a001a001a; ++ *((unsigned long *)&__m128i_result[0]) = 0x001a001a001a001a; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001e001e001e001e; ++ *((unsigned long *)&__m128i_result[0]) = 0x001e001e001e001e; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001d001d001d001d; ++ *((unsigned long *)&__m128i_result[0]) = 0x001d001d001d001d; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000800000008; ++ __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001600000016; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001600000016; ++ __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_result[0]) = 0x1f5533a694f902c0; ++ __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x37c0001000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x37c0001000000001; ++ __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xcf00000000000000; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000011; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001c; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001c; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000d; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_result[0]) = 0x43d3e0000013e000; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001d; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001d; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001b; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c +new file mode 100644 +index 000000000..b2a7a35bd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c +@@ -0,0 +1,434 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffff000000ff00; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff91fffffff5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff00650001ffb0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000067400002685; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ff91fffffff5; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00650000ff85; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffcafff8ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000a0; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000008680f1ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80ffffff80ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff80ffff8680f1ff; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01e41ffff0e440; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe4ffffffe4ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe4fffff0e4ff; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000a16316b0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000063636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000a1630000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc0ff81000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000600000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffc0ff81000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000401000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00fdffffffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe80000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe80ffffffffff02; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x027e0000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe80ffffffffff02; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffe0000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_result[0]) = 0xbbc8ecc5f3ced5f3; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc090380000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc090380000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xc090380000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8493941335f5cc0c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x625a7312befcb21e; ++ *((unsigned long *)&__m128i_result[1]) = 0x8493941300000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000002befcb21e; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000078c00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000d; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x000a000a000a000a; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc605c000aedd0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5d5d5d5d5d5d5d5d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5d5d5d5d5d5d0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long *)&__m128i_result[0]) = 0xc605c000aedd0000; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000003000000d613; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c0000000; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000200000001b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff800000ff800000; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000210011084; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000017f0a82; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x027c027c000027c0; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000010000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3e25c8317394dae6; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcda585aebbb2836a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xcda585aebbb2836a; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_result[0]) = 0x377b810912c0e000; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_result[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcfcfcfcfcfd; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_result[0]) = 0xf9796558e39953fd; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c +new file mode 100644 +index 000000000..c90cae75e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c +@@ -0,0 +1,344 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000300000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffe0004fffe0004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_result[0]) = 0xf9796558e39953fd; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_result[0]) = 0x27b169bbb8145f50; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c63636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x9c9c9c9c00000000; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207f7f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f417f417f027e03; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m128i_result[0]) = 0x2020202020207e03; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00008d3200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x09e8e9012fded7fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x479f64b03373df61; ++ *((unsigned long *)&__m128i_result[1]) = 0x00008d3200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a09080709080706; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a09080709080706; ++ *((unsigned long *)&__m128i_result[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a09080709080706; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xa87745dbd93e4ea1; ++ *((unsigned long *)&__m128i_op1[0]) = 0xaa49601e26d39860; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_result[1]) = 0x2006454652525252; ++ *((unsigned long *)&__m128i_result[0]) = 0x2006454652525252; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000040100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffff2382; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000040100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7da9b23a624082fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001010000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0505050505050505; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000005050000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0028280000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0028280000282800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0028280000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000282800; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfc01fd13fc02fe0c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fd14fe01fd16; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff0000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe00fd1400010000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2000200020002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2000200020002000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000005003a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x86dd8341b164f12b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9611c3985b3159f5; ++ *((unsigned long *)&__m128i_result[1]) = 0x86dd8341b164f12b; ++ *((unsigned long *)&__m128i_result[0]) = 0x9611c3985b3159f5; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000de0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000006f00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001f0a; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000fea0000fffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff007fff810001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000400530050ffa6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff7f810100001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001fffc0ffffe001; ++ *((unsigned long *)&__m128i_result[1]) = 0xff7f810100001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000400530050ffa6; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007efe7f7f8000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000b81c8382; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000077af9450; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000077af9450; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c +new file mode 100644 +index 000000000..772d040c3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c +@@ -0,0 +1,314 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffffffc; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00002f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958aefff895e; ++ *((unsigned long *)&__m128i_result[1]) = 0xfafafafafafafafa; ++ *((unsigned long *)&__m128i_result[0]) = 0xfafa958aeffa89fa; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, -6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_result[1]) = 0xfbfbfbfbadadadad; ++ *((unsigned long *)&__m128i_result[0]) = 0xfbfbfbfbadadadad; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf1f1f1f1f1f1f1f1; ++ *((unsigned long *)&__m128i_result[0]) = 0xf1f1f1f1f1f1f1f1; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000f50000007500; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007e1600007d98; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000090900000998; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf1f181a2f1f1f1b0; ++ *((unsigned long *)&__m128i_result[0]) = 0xf1f1f1f1f180f1f1; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff6fff6fff6fff6; ++ __m128i_out = __lsx_vmini_h (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1716151416151413; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1514131214131211; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff3fff3fff3fff3; ++ __m128i_out = __lsx_vmini_h (__m128i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefefefefefefefe; ++ __m128i_out = __lsx_vmini_h (__m128i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_h (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_h (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_h (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff4fffffff4; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff4fffffff4; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff3fffffff3; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fffefffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffefffef; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01fe0400000006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000500000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01fe0400000005; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffafffffffa; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffafffffffa; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, -6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000d0000000d; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, 8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x345002920f3017d6; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff7fffffff7; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000000010000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100100000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff1; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff1; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000006; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000006; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00a6ffceffb60052; ++ *((unsigned long *)&__m128i_result[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff0; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff9; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff9; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x111110ff11111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111100; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x55aa55c3d5aa55c4; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaa55556fd5aaaac1; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m128i_result[0]) = 0xaa55556fd5aaaac1; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff4; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffb; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffb; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_result[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcdcfcfcfcdc; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001030103; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffc; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000085af0000b000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00017ea200002000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff4; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01e41ffff0ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01e41ffff0ffff; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c +new file mode 100644 +index 000000000..6eaae2134 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c +@@ -0,0 +1,216 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000a163000016b0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0303000103030001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000030300000303; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd8248069ffe78077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7da9b23a624082fd; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0505050505050505; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000005050000; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000e0000000e; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000001fffdfffdff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000001fffdfffdff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010101010101; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000009c007c00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000071007600; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000009000900; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000009000900; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_result[0]) = 0x0303030303030303; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3220000d3f20000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8bff0000a7b80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0909000009090000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0909000009090000; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_hu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80000000b57ec564; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000083ff0be0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0014000000140014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0014000000140014; ++ __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0013001300130013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0013001300130013; ++ __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x02b010f881a281a2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040004000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff0000007f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000003fc00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fe01fe00; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000a; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000d3460001518a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000084300000e55f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000016; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff2356fe165486; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5efeb3165bd7653d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000007; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-vrotr-vrotri-vsra-.patch b/LoongArch-Add-tests-for-SX-vector-vrotr-vrotri-vsra-.patch new file mode 100644 index 0000000000000000000000000000000000000000..a46d041be2163b7b3fec57430aa0919043e062cb --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-vrotr-vrotri-vsra-.patch @@ -0,0 +1,3173 @@ +From 67c36add58d634551a200f1473be3c7368530da1 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 11:13:32 +0800 +Subject: [PATCH 090/124] LoongArch: Add tests for SX vector + vrotr/vrotri/vsra/vsrai/vsran/vsrani /vsrarn/vsrarni instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vrotr.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vrotri.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsra.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrai.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsran.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrani.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrar.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrari.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrarn.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrarni.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vrotr.c | 381 +++++++++++++++++ + .../loongarch/vector/lsx/lsx-vrotri.c | 294 +++++++++++++ + .../loongarch/vector/lsx/lsx-vsra.c | 344 +++++++++++++++ + .../loongarch/vector/lsx/lsx-vsrai.c | 258 ++++++++++++ + .../loongarch/vector/lsx/lsx-vsran.c | 290 +++++++++++++ + .../loongarch/vector/lsx/lsx-vsrani.c | 246 +++++++++++ + .../loongarch/vector/lsx/lsx-vsrar.c | 354 ++++++++++++++++ + .../loongarch/vector/lsx/lsx-vsrari.c | 265 ++++++++++++ + .../loongarch/vector/lsx/lsx-vsrarn.c | 236 +++++++++++ + .../loongarch/vector/lsx/lsx-vsrarni.c | 398 ++++++++++++++++++ + 10 files changed, 3066 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c +new file mode 100644 +index 000000000..c42440cea +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c +@@ -0,0 +1,381 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2001240128032403; ++ *((unsigned long *)&__m128i_op1[0]) = 0x288b248c00010401; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffdfffefffff7ffe; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5); ++ *((unsigned long *)&__m128i_op0[1]) = 0x2700000000002727; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000002727; ++ *((unsigned long *)&__m128i_op1[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd705c77a7025c899; ++ *((unsigned long *)&__m128i_result[1]) = 0xc9c00000000009c9; ++ *((unsigned long *)&__m128i_result[0]) = 0x0013938000000000; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000000010000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100100000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x2000000020000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200200000; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_op0[0]) = 0x203e16d116de012b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_result[0]) = 0x203e16d116de012b; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x9f009f009f009f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x9f009f009f009f00; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000004fc04f81; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000004fc04f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000004fc04f81; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000004fc04f80; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff000000ff00; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000958affff995d; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000de0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001f0a; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x41dfffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfbffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7bffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfbffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7bffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xf7ffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xf7feffffffffffff; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0ba00ba00ba00ba0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0ba00ba00ba011eb; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf1819b7c0732a6b6; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffb9917a6e7fffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x05d0ba0002e8802e; ++ *((unsigned long *)&__m128i_result[0]) = 0xd005e802174023d6; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000691a6c843c8fc; ++ *((unsigned long *)&__m128i_result[0]) = 0x000691a6918691fc; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000003f0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffc3ffff003e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_result[1]) = 0xc000000fc0003fff; ++ *((unsigned long *)&__m128i_result[0]) = 0xbffffff0ffffc00f; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_result[1]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m128i_result[0]) = 0xffefffefffefffef; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001010002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010002; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4e3e133738bb47d2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x9c7c266e71768fa4; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001a64b345308091; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001f2f2cab1c732a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000014414104505; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1011050040004101; ++ *((unsigned long *)&__m128i_result[1]) = 0x001a323b5430048c; ++ *((unsigned long *)&__m128i_result[0]) = 0x008f792cab1cb915; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001e03; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001a64b345308091; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001f2f2cab1c732a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000780c00000; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00020000ffff0001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000b000b000b000b; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0005840100000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0005847b00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x636363633f3e47c1; ++ *((unsigned long *)&__m128i_op1[0]) = 0x41f8e080f1ef4eaa; ++ *((unsigned long *)&__m128i_result[1]) = 0xa000308000008002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0500847b00000000; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c +new file mode 100644 +index 000000000..4ae4dbf8b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c +@@ -0,0 +1,294 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000000020000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0d1bffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd915e98e2d8df4d1; ++ *((unsigned long *)&__m128i_result[1]) = 0xd0b1ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x9d519ee8d2d84f1d; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_op0[0]) = 0x203e16d116de012b; ++ *((unsigned long *)&__m128i_result[1]) = 0x887c8beb969e00f2; ++ *((unsigned long *)&__m128i_result[0]) = 0x101f8b680b6f8095; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0800000008000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0800000008000000; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000c00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffff01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffeff400000df4; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff03fe; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe9df0000e81b; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000de00003e14; ++ *((unsigned long *)&__m128i_result[0]) = 0x00012b15ffff32ba; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80001b155b4b0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x80001b155b4b0000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffefffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffefffff; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_result[1]) = 0x1111311111114111; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111311111112111; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0008000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff800000003; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000003f0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffc3ffff003e; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001f80007fff80; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe1ffff801f7f; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff0000ffff0000f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff02d060; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff02d060; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x27b9331b8e77ead9; ++ *((unsigned long *)&__m128i_op0[0]) = 0x58d6bf1867ace738; ++ *((unsigned long *)&__m128i_result[1]) = 0xe4cc6c9edfab6639; ++ *((unsigned long *)&__m128i_result[0]) = 0x5afc6163b39ce19e; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c +new file mode 100644 +index 000000000..fd7c22a82 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c +@@ -0,0 +1,344 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ed0008005e00a2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007a007600150077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0007007f03fe0000; ++ __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe001ffffe001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe001ffffe001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fc000003fc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fc000003fc00000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fc000003fc00000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fc000003fc00000; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00003ff000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fffc00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd83c8081ffff8080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long *)&__m128i_result[0]) = 0xd83c8081ffff8080; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbddaa86803e33c2a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long *)&__m128i_op1[0]) = 0xbddaa86803e33c2a; ++ *((unsigned long *)&__m128i_result[1]) = 0xff0600d50e9ef518; ++ *((unsigned long *)&__m128i_result[0]) = 0xffefffa8007c000f; ++ __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xaaaaffebcfb748e0; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfd293eab528e7ebe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffefff6fff80002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_op1[0]) = 0x803f800080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000700ff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000040004000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000700ff00000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000820000ff81; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff810000ff81; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000820000ff81; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff810000ff81; ++ __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x800080007f008000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a0aa9890a0ac5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffff000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3918371635143312; ++ *((unsigned long *)&__m128i_op1[1]) = 0x21201f1e1d001b25; ++ *((unsigned long *)&__m128i_op1[0]) = 0x191817161514131d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001e8e1d8; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000e400000001; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000080008; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000fffe01fd02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000040002; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000c0c00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffac0a000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x801d5de0000559e0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x77eb86788eebafe1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffac00000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcfcfcfc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_result[0]) = 0x5252525252525252; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2e2b34ca59fa4c88; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0802080408060803; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001fffe0001fff; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8000007f800000; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000047fe2f0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000047fe2f0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fec20704; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000043fe2fc; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000001fffff; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c +new file mode 100644 +index 000000000..2ca4f0b7a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c +@@ -0,0 +1,258 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001ffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ca354688; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000007; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffc0ffff003f; ++ __m128i_out = __lsx_vsrai_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf6e91c0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x51cfd7c000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffd000700000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0014fff500000000; ++ __m128i_out = __lsx_vsrai_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3c600000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0f180000ffe00000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21f32eaf5b7a02c8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x407c2ca32cbd0357; ++ *((unsigned long *)&__m128i_result[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_result[0]) = 0x203e16d116de012b; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x01ff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x01ff000000000000; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1268f057137a0267; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0048137ef886fae0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffffe2; ++ __m128i_out = __lsx_vsrai_w (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ffffffffff; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffe80; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001800000039; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000049ffffffaa; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000060000000e; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000127fffffea; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0aa077b7054c9554; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40c7ee1f38e4c4e8; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vsrai_h (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_w (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fff3fff3fff3fff; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002ebf; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x31); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x31); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_w (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000190; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00f0001000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00f0001000000010; ++ __m128i_out = __lsx_vsrai_h (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsrai_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c +new file mode 100644 +index 000000000..4e7c7ab7e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c +@@ -0,0 +1,290 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffe0001fffe; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0303020102020001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000201; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd82480697f678077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0301020100000004; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff02; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3c5fffffff7fffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffefffeff00feff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000e0180000e810; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000f0080000f800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000e0180000e810; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000f0080000f800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000f0f800; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff00000000; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100089bde; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x80044def00000001; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000100f8100002; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff0ff8006f0f950; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff7a53; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000bf; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000002bb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000021e79364; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000718ea657431b; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfefffffffeffda6f; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfefffffffeffe3d7; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff0000ff86; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101fe870101fe87; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101fe8700000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x353c8cc4b1ec5b09; ++ *((unsigned long *)&__m128i_op1[1]) = 0x002affd600000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcbc2723a4f12a5f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808000000035; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff80ff00ff80ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff000ff6220c0c1; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffe8081000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff000ff6220c0c1; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffe8081000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xb110606000000000; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0037ffd40083ffe5; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001e0052001ffff9; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00df020f0078007f; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff76ffd8ffe6ffaa; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffc105d1aa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffbc19ecca; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe03ff63ff9bf; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x06d9090909090909; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0039d21e3229d4e8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6d339b4f3b439885; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000db24848; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c +new file mode 100644 +index 000000000..92988035d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c +@@ -0,0 +1,246 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005000501800005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x438ff81ff81ff820; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x03ff03ff03ff03ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000043; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x78); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000002020202; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x5b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000009; ++ *((unsigned long *)&__m128i_op1[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd705c77a7025c899; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x03fdfffcfefe03fe; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010001000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00ff00ffffff; ++ __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1e0200001e020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040004000400040; ++ __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001ffce00016fb41; ++ *((unsigned long *)&__m128i_op0[0]) = 0x57cb857100001a46; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfbffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7bffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000150000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffeffff001effff; ++ __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x1); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2020202020207fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x01010101010101ff; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff082f000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00005dcbe7e830c0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03f21e0114bf19da; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000003f200001e01; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000014bf000019da; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005fe0300010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100010001; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf0bc9a5278285a4a; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x62cbf96e4acfaf40; ++ __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x40); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f54e0ab00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffb6d01f5f94f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001f50000; ++ __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x808080e280808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080636380806363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080638063; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x63); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f07697100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000076971000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000003020302; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff81; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000c0c00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x58); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5847b72626ce61ef; ++ *((unsigned long *)&__m128i_op0[0]) = 0x110053f401e7cced; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5847b72626ce61ef; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005847b00011005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0005847b00000000; ++ __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c +new file mode 100644 +index 000000000..6a842d9ce +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c +@@ -0,0 +1,354 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffff02fff4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00ff00ff00ff00; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000400000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefe6a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c2bac2c2; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010000003f; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f804f804f804f80; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80010001b57fc565; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8001000184000be0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x80010001b57fc565; ++ *((unsigned long *)&__m128i_result[0]) = 0x8001000184000be0; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000958affff995d; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc0fffff000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000bf; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000002bb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc0fffff000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff6080ffff4417; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff6080ffff4417; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fbf3fbf00007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000003a0000003a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000003a0000003a; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0086000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0082000000000007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0086000000040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0082000000000007; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x467f6080467d607f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0037ffc8d7ff2800; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[1]) = 0x001bffe4ebff9400; ++ *((unsigned long *)&__m128i_result[0]) = 0xff80000000000000; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2a29282726252423; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2a29282726252423; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000005452505; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000004442403e4; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100010001000100; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000c0c00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc00000ff800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffe4866c86; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe4866c86; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000002000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000002000000; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1748c4f9ed1a5870; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x680485c8b304b019; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc89d7f0ff90da019; ++ *((unsigned long *)&__m128i_op1[1]) = 0x680485c8b304b019; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc89d7f0ff90da019; ++ *((unsigned long *)&__m128i_result[1]) = 0x00680486ffffffda; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff913bfffffffd; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c +new file mode 100644 +index 000000000..2a353d65a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c +@@ -0,0 +1,265 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000cb4a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000f909; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff5fff4002ffff5; ++ __m128i_out = __lsx_vsrari_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc0ff81000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff0ffe04000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000000f3; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000f3; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fdfc0000fd03; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000017161515; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000095141311; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x34); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000109000000c9; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x77c0404a4000403a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x77c03fd640003fc6; ++ *((unsigned long *)&__m128i_result[1]) = 0x00f0008100800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x00f0008000800080; ++ __m128i_out = __lsx_vsrari_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000006c80031; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001200100012001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080000000800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000404040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb020302101b03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001340134013401; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001340134013401; ++ __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c +new file mode 100644 +index 000000000..60d474203 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c +@@ -0,0 +1,236 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffefffffffef; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffff1; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefff6fff80002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff000000fefb0000; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000c2f90000bafa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000c2fa8000c2fa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc2f9bafac2fac2fa; ++ __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0204; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3918371635143312; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000001d5d4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000150d707009; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x03f1e3d28b1a8a1a; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffefffefffeffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffefffefffeffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff7f810100001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001fffc0ffffe001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000002259662; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc4dbe60354005d25; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f01000000f8ff00; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff6ff4ffff8db8; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffbaf4ffffb805; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff4ffb800ff0080; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000044470000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00004dce00004700; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0b4c600000000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x08080807f5f5f5f8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0202f5f80000ff00; ++ __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0d060d060d060d06; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0d060d060d060d06; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0d060d060d060d06; ++ __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000011ff040; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff01fe03ff01fe03; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01fe03ff01fe03; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01fe03ff01fe03; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c +new file mode 100644 +index 000000000..3aa23bdc8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c +@@ -0,0 +1,398 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff020000fff4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff020000fff4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fc0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1e801ffc00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080007f80800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x4b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000001e5; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x5000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff8000002f4ef4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000f4a8; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00100184017e0032; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0086018c01360164; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffff33c4b1e67; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800c0004300c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000001ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0020808100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x29); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x64); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x028c026bfff027af; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000003fc03fc00; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffc00a3009b000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ffa7f8ff81; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000003f0080ffc0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000007fff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000a7f87fffff81; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffd400000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000004000000040; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000080003f80ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000001fc00000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff80010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1ffffffff8001000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf0bd80bd80bd8000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x24); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xecec006c00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xecec006c00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001ff85ffdc0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000332ae5d97330; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1ff85ffe2ae5d973; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000043c5ea7b6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000008fc4ef7b4; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000fea0000fffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x48); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000dfa6e0c6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000d46cdc13; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x64); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x09e8e9012fded7fd; ++ *((unsigned long *)&__m128i_op0[0]) = 0x479f64b03373df61; ++ *((unsigned long *)&__m128i_op1[1]) = 0x04c0044a0400043a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x04c004d6040004c6; ++ *((unsigned long *)&__m128i_result[1]) = 0x1d20db00ec967bec; ++ *((unsigned long *)&__m128i_result[0]) = 0x00890087009b0099; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080000180800001; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000003e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe00fe000200fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe000200fe; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000003e; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefe02fefefe02fe; ++ __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000000010000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0103000201030002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffc000400000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00003fff00010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x6d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff010000ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128i_result[0]) = 0xf359f359f359f359; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffab7e71e33848; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xce9135c49ffff570; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000807bf0a1f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000800ecedee68; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0005840100000005; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0005847b00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001f0a20001cedf; ++ *((unsigned long *)&__m128i_result[0]) = 0x0058000000580000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffb1fb1000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf2c97aaa7d8fa270; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0b73e427f7cfcb88; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0a545374471b7070; ++ *((unsigned long *)&__m128i_op0[0]) = 0x274f4f0648145f50; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0xa8a736e19e9e28bf; ++ *((unsigned long *)&__m128i_result[0]) = 0x9e9f9e9f9e9f9e9f; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-vsll-vslli-vsrl-vs.patch b/LoongArch-Add-tests-for-SX-vector-vsll-vslli-vsrl-vs.patch new file mode 100644 index 0000000000000000000000000000000000000000..e0e8d11e59b8ec0056f56f7988ad005681a242b5 --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-vsll-vslli-vsrl-vs.patch @@ -0,0 +1,4023 @@ +From 64d3c9507fdf2829659affdb7d0490e7b2888787 Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 10:55:35 +0800 +Subject: [PATCH 089/124] LoongArch: Add tests for SX vector + vsll/vslli/vsrl/vsrli/vsrln/vsrlni/vsrlr /vsrlri/vslrlrn/vsrlrni + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vsll.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vslli.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrl.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrli.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrln.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrlni.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrlr.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrlri.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vsll.c | 254 +++++++ + .../loongarch/vector/lsx/lsx-vslli.c | 293 ++++++++ + .../loongarch/vector/lsx/lsx-vsllwil-1.c | 244 +++++++ + .../loongarch/vector/lsx/lsx-vsllwil-2.c | 189 +++++ + .../loongarch/vector/lsx/lsx-vsrl.c | 389 ++++++++++ + .../loongarch/vector/lsx/lsx-vsrli.c | 328 +++++++++ + .../loongarch/vector/lsx/lsx-vsrln.c | 335 +++++++++ + .../loongarch/vector/lsx/lsx-vsrlni.c | 281 +++++++ + .../loongarch/vector/lsx/lsx-vsrlr.c | 434 +++++++++++ + .../loongarch/vector/lsx/lsx-vsrlri.c | 300 ++++++++ + .../loongarch/vector/lsx/lsx-vsrlrn.c | 164 +++++ + .../loongarch/vector/lsx/lsx-vsrlrni.c | 686 ++++++++++++++++++ + 12 files changed, 3897 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c +new file mode 100644 +index 000000000..7b8ad7d5a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c +@@ -0,0 +1,254 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0xb9884ab93b0b80a0; ++ *((unsigned long *)&__m128i_result[0]) = 0xf11e970c68000000; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100000100010001; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00307028003f80b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0040007fff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffc0ffffff81; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff008000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0060e050007f0160; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040007fff800000; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000401000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000401000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fffffff80000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00003ffd000a4000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffcffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fffd000a0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf000800080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000a00028004000; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6b9fe3649c9d6363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363bc9e8b696363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6b9fe3649c9d6363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363bc9e8b696363; ++ *((unsigned long *)&__m128i_result[1]) = 0xb9fe3640e4eb1b18; ++ *((unsigned long *)&__m128i_result[0]) = 0x800000005b4b1b18; ++ __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80001b155b4b0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00006c82; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00009b140000917b; ++ *((unsigned long *)&__m128i_result[1]) = 0x80000000fffffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xb150000000000000; ++ __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff7e00000081; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000008000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03f1e3d28b1a8a1a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x03f1e3d28b1a8a1a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x18e2184858682868; ++ __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff02d060; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff02d060; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff02d06000000000; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000200000001c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000200000001c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000200000001c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000200000001c; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000020000000c0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000020000000c0; ++ __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c +new file mode 100644 +index 000000000..7a77e80c0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c +@@ -0,0 +1,293 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xaaaaffebcfb748e0; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfd293eab528e7ebe; ++ *((unsigned long *)&__m128i_result[1]) = 0xf6e91c0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x51cfd7c000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffff0ffe04000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc39fffff007fffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000fe00fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000003f803f4; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff00ffff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0xfcfcfc00fcfc00fc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcfcfcfcfc00; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000060; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x38); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000f00f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000f00f; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000d46cdc13; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000060000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x61608654a2d4f6da; ++ *((unsigned long *)&__m128i_result[1]) = 0xfee0000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc2c00ca844a8ecb4; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x36); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff010300ff0103; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf0003000f0003000; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff800fff01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff001ffe02; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5779108fdedda7e4; ++ *((unsigned long *)&__m128i_result[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m128i_result[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcdcfcfcfcdc; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xc0c0c0c0c0c0c0c0; ++ *((unsigned long *)&__m128i_result[0]) = 0xc0c0c0c0c0c0c0c0; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe2560afe9c001a18; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe2560afe9c001a18; ++ *((unsigned long *)&__m128i_result[1]) = 0x89582bf870006860; ++ *((unsigned long *)&__m128i_result[0]) = 0x89582bf870006860; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x841f000fc28f801f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x107c003c083c007c; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff9727ffff9727; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffe79ffffba5f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff972700000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffba5f00000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x20); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x101b0330eb022002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030220020310edc0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080800080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080008000; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x317fce80317fce80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf0000000f0000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000011ff8bc; ++ *((unsigned long *)&__m128i_result[1]) = 0x05dfffc3ffffffc0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000047fe2f0; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c +new file mode 100644 +index 000000000..796e88cad +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c +@@ -0,0 +1,244 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000e0000000e0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000fc00; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000fc00; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffeb48e03eab7ebe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc0fac01200f800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f80eac01f80ef80; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000e7e20468; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc2fac2fa53e7db29; ++ *((unsigned long *)&__m128i_result[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_result[0]) = 0x00a6ffceffb60052; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x002e0059003b0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000005c000000b2; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000007600000000; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2e34594c3b000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x017001a002c80260; ++ *((unsigned long *)&__m128i_result[0]) = 0x01d8000000000000; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0x09e009e009e009e0; ++ *((unsigned long *)&__m128i_result[0]) = 0x09e009e009e009e0; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000040000000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000005050000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0505000005050505; ++ *((unsigned long *)&__m128i_result[1]) = 0x0028280000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0028280000282800; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc0000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffc0000000000000; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffff00; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf10cf508f904fd01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf10cf508f904fd01; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff208fffffa02; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d001b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001918000017160; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001514000013120; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffff60ca7104649; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff790a15db63d; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffc00ffde4000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe857400fed8f400; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1c6c80007fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0038d800ff000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fffe00fffffe00; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff800000000000; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffe00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000007fff800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff80ff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff80000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000001fffe; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000040004000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010002000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000017fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x003fffffff800000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x57f160c4a1750eda; ++ *((unsigned long *)&__m128i_result[1]) = 0x000002bf8b062000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffd0ba876d000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c +new file mode 100644 +index 000000000..5f46293dc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c +@@ -0,0 +1,189 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f7f02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003f803f800100; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0014000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f807f807f807f80; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001030103; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0020006000200060; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080805; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080805; ++ *((unsigned long *)&__m128i_result[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0020002000200014; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000201fe01fc; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000201fe01fc; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff1affff01001fe0; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff1aff6d02834d70; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f800d007f803680; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100418026803800; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3e2b34ca59fa4c88; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long *)&__m128i_result[1]) = 0x0007658000115de0; ++ *((unsigned long *)&__m128i_result[0]) = 0x001a8960001d2cc0; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffff000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040600000406; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020202020202fe02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0020200000202000; ++ *((unsigned long *)&__m128i_result[0]) = 0x002020000fe02000; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000001ffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3131313131313131; ++ *((unsigned long *)&__m128i_result[1]) = 0x0313100003131000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0313100003131000; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000900000009; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000090; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000090; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000020000007d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000001f400000; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000280000; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fef01000e27ca; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001fde020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001c4f940000; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ffffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ffffffff00; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000fffffffe000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000102020204000; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ *((unsigned long *)&__m128i_op0[1]) = 0x8d78336c83652b86; ++ *((unsigned long *)&__m128i_op0[0]) = 0x39c51f389c0d6112; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001ce28f9c0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000004e06b0890; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c +new file mode 100644 +index 000000000..f9c789855 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c +@@ -0,0 +1,389 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffefffffffef; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001000f000e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fff1000ffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002a55005501; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002a55000001; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80000000fff8fff8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f800000fff8fff8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f800000fff80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x80000000fff80000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0004000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004000000040000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000750500006541; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000100fffffefd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00f900d7003d00e4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003e00d100de002b; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f4000007f040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f0200007f020000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe000000f6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x01010101ffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x01010101000000f6; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000049000000c0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001ffffff29; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff7f00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff007f0101017f; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff2900000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000401000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff2900000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc2f9bafac2fac2fa; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101080408040804; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0804080407040804; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000010a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101080408040804; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100810080e081; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4688500046f6a000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f8000004f7fff02; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffffff03ffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00013fff; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000021ffffffdf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000e60; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0202fe02fd020102; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0400040004000400; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101fe870101fe87; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101fe8700000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x61608654a2d4f6da; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000fb01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000007000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000fb01; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000e0000; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff0000000000; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff000100ff00fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff003000ff00a0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff000100ff00fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff003000ff00a0; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100010100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffe0000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff00ff; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe7fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000001fd02; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffe1fffffff; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff7fffffff7f; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff007fff810001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000400530050ffa6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff800fff01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000056f64adb9464; ++ *((unsigned long *)&__m128i_op1[0]) = 0x29ca096f235819c2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000004399d32; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c +new file mode 100644 +index 000000000..7b5e9a7bf +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c +@@ -0,0 +1,328 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001ffff0001ffff; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000020000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000100000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000080000; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000017f0a82; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x03ff03ff03ff03ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000400000204010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000020000010200; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000003fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000003fffffff; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x37); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0020002000200020; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffefffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffefffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0007000700070007; ++ *((unsigned long *)&__m128i_result[0]) = 0x0007000700070007; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000c000c000c000c; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000003d0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000003d0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000030000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000030000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00fe00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000100; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000400000000; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xaa14efac3bb62636; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd6c22c8353a80d2c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000300000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000000010000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080000700000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffbffda; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001010101; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x000001fffdfffdff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000001fffdfffdff; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x001a64b345308091; ++ *((unsigned long *)&__m128i_result[0]) = 0x001f2f2cab1c732a; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000290; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000290; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020000ffff0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000003030000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000002345454; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c0dec4ca; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000060006; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000200000000d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000eefff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000003e0000003f; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c +new file mode 100644 +index 000000000..5a8f4f70a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c +@@ -0,0 +1,335 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000c77c000047cd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000c0f100006549; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffdfff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffdfff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe00001ffe200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffdfff; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff35cab978; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff35cab978; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010035; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x80307028ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8040007fffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0101ff010101; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4180418041804180; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000fffefffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00000000; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00008bf700017052; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000f841000091aa; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe6d4572c8a5835bc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe5017c2ac9ca9fd0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000f8410000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001010001; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000100000001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0ed5ced7e51023e5; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001000e51023e5; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffbfff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010001; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000020002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000020002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000017ffeffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000017ffeffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x379674c000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3789f68000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfefeff00fefeff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfefeff00fefeff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00c0000000800000; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000071768fa4; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffdfdc0d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffdfdc0d; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000246d9755; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002427c2ee; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c +new file mode 100644 +index 000000000..ca462c834 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c +@@ -0,0 +1,281 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003fe00ffe3fe0; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001f; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x7b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc39fffff007fffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000fe00fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x78c00000ff000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x61cf003f0000007f; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000003c607f80; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff7f01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff7f01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffe03; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffe03; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff8001ffff8001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0x000fffefffefffef; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x4b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363797c63990099; ++ *((unsigned long *)&__m128i_op0[0]) = 0x171f0a1f6376441f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363797c63990099; ++ *((unsigned long *)&__m128i_op1[0]) = 0x171f0a1f6376441f; ++ *((unsigned long *)&__m128i_result[1]) = 0x181e180005021811; ++ *((unsigned long *)&__m128i_result[0]) = 0x181e180005021811; ++ __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003fff00003fff; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf0fd800080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000a00028004000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000f000800000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x000f000000000000; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long *)&__m128i_op1[1]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long *)&__m128i_op1[0]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00000000; ++ __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000008140c80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000008140c80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000002050320; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x010101017f010101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000040600000406; ++ *((unsigned long *)&__m128i_result[0]) = 0x020202020202fe02; ++ __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe364525335ede000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000fff00000e36; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x34); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x601fbfbeffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff8000000000000; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000008; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7c7c000000007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000001f1f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000bffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000040001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x6d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe4c8b96e2560afe9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc001a1867fffa207; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe4c8b96e2560afe9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc001a1867fffa207; ++ *((unsigned long *)&__m128i_result[1]) = 0xe2560afe9c001a18; ++ *((unsigned long *)&__m128i_result[0]) = 0xe2560afe9c001a18; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x24); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000042ab41; ++ *((unsigned long *)&__m128i_op0[0]) = 0xb1b1b1b1b16f0670; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000044470000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000080c43b700; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x56); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_op1[0]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_result[1]) = 0x022002101b200203; ++ *((unsigned long *)&__m128i_result[0]) = 0x022002101b200203; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c +new file mode 100644 +index 000000000..211339bb8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c +@@ -0,0 +1,434 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x43e092728266beba; ++ *((unsigned long *)&__m128i_op1[0]) = 0x43d8969cc4afbf2d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8000007f800000; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc001fffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000200020002; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffff0ffe04000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000200010; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000101fd01fe; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80ff80ff80ff80; ++ *((unsigned long *)&__m128i_result[0]) = 0xff80ff8080008000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0982e2daf234ed87; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff51cf8da; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffd6040188; ++ *((unsigned long *)&__m128i_result[1]) = 0x00020002000d0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000020f2300ee; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000003fc; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000003fc; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0040000000400000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040000000400000; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0020808100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff208fffffa02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff208fffffa02; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x111110ff11111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000f00f; ++ *((unsigned long *)&__m128i_result[1]) = 0x111110ff11111141; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111113111111100; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f54e0ab00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000003fbf3fbf; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000100; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfeca2eb9931; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00d3007c014e00bd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x06e1000e00030005; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m128i_op0[0]) = 0x363d753d50155c0a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe500c085c000c005; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe5c1a185c48004c5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002020002020200; ++ *((unsigned long *)&__m128i_result[0]) = 0x021f3b0205150600; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffe000ffdf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffe000ffdf; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffe080f6efc100f7; ++ *((unsigned long *)&__m128i_op1[0]) = 0xefd32176ffe100f7; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000040000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000040000000000; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffdfe01; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffdfe0200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4000000000000000; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_result[0]) = 0xa352bfac9269e0aa; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000158; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00009c7c00007176; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffeff98; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0014ffe4ff76ffc4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x084d1a0907151a3d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff9fffefff9ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0280000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0700f8ff0700f8ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0700f8ff0700f8ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3bc000003a800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000090a00000998; ++ *((unsigned long *)&__m128i_result[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000ef0000000003b; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0005847b00011005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0005847b00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000807bf0a1f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000800ecedee68; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005840100000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0005847b00000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00250023001c001d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x309d2f342a5d2b34; ++ *((unsigned long *)&__m128i_result[1]) = 0x00060eb000000006; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000075c00000cf0; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c +new file mode 100644 +index 000000000..2c3a53416 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c +@@ -0,0 +1,300 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005252800052528; ++ *((unsigned long *)&__m128i_result[0]) = 0x0005252800052528; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0200020002000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0200020002000200; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffc001fffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000200000; ++ *((unsigned long *)&__m128i_result[0]) = 0x001fff8004000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00060001fffe8003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000200010; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000078c00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000078c00000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x4000400000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000040004000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001800390049ffaa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0029ff96005cff88; ++ *((unsigned long *)&__m128i_result[1]) = 0x001800390049ffaa; ++ *((unsigned long *)&__m128i_result[0]) = 0x0029ff96005cff88; ++ __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03c0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03c0038000000380; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc605c000aedd0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000005151515; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000006302e00; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2000200000013fa0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000020000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000020000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000dc300003ffb; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000dc300003ffb; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808000000035; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200000000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00018d8e00018d8e; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fc03fc000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000003fc00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001fe01fe00; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x045340a628404044; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001030103; ++ __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x86dd8341b164f12b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9611c3985b3159f5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0021b761002c593c; ++ *((unsigned long *)&__m128i_result[0]) = 0x002584710016cc56; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_result[0]) = 0xbbc8ecc5f3ced5f3; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080801030000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080103040000; ++ __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c +new file mode 100644 +index 000000000..c630b4261 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c +@@ -0,0 +1,164 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000efffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000383ffff1fff; ++ __m128i_out = __lsx_vsrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003fc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000003fc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x002affd600000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcbc2723a4f12a5f8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffd60001723aa5f8; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x467f6080467d607f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808081; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xe000e0006080b040; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101030101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101030101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000fffa0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffa0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101000101010001; ++ __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80ffffffffff80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff80ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6a5d5b056f2f4978; ++ *((unsigned long *)&__m128i_op1[0]) = 0x17483c07141b5971; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0800010001ff8000; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff01ff01ac025c87; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff01ff01ac465ca1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c +new file mode 100644 +index 000000000..468a17c15 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c +@@ -0,0 +1,686 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff8969ffffd7e2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000d688ffffbd95; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf12dfafc1ad1f7b3; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x34); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000100; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000c0002000c0002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000400c600700153; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000c0002000c0002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000400c600700153; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000010000007f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0800000400000800; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001515151500; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001515151500; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001515000015150; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fdfd0404; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fffffff3fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fffffff3fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000fc08; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000fc08; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffba420000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000007e044000400; ++ *((unsigned long *)&__m128i_result[0]) = 0xfdd2100000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000081e003f3f3f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f3f3f0e00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000081e003f3f3f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f3f3f0e00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000103c007e7e8; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000103c007e7e8; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x43); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0202022302023212; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0202ff3f02022212; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002100003010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff3f00002010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x79); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe2bb5ff00e20aceb; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe2bb5ff00e20aceb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100010000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00e3000e00e3000e; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf58df7841423142a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f7477f8ff4e2152; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3d3e0505101e4008; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2bd5d429e34a1efb; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfc0203fccbedbba7; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc9f66947f077afd0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x89fed7c07fdf5d00; ++ *((unsigned long *)&__m128i_result[1]) = 0x14f1a50ffe65f6de; ++ *((unsigned long *)&__m128i_result[0]) = 0xa3f83bd8e03fefaf; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6ed694e00e0355db; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000010600000106; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xe00e035606000001; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe739e7ade77ae725; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbb9013bd049bc9ec; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x56aca41400000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7ade77ae3bd049bd; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000041400000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1010101010101010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8081808180818081; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000006ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0037f80000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x69); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080808080c04040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101010001808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000202000008081; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001010100010101; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00fff00000001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x6b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000adf0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001e00; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0040000000400040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000020002020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808102; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001010102; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001000100010000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03fc03fc03fc03fc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x04000400ff01ff01; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1010101010101010; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000fff800000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000001ed68; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1ff6a09e667f3bd8; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000007b5a; ++ *((unsigned long *)&__m128i_result[0]) = 0x999fcef600000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffe5c8000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x91f80badc162a0c4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x99d1ffff0101ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff400000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x905d0b06cf0008f8; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3802f4fd025800f7; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc8ff0bffff00ffae; ++ *((unsigned long *)&__m128i_op1[0]) = 0x91ff40fffff8ff50; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000200000000700; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000192000001240; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x33); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff0ffd0ffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff0ffc0001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbb7743ca4c78461f; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd9743eb5fb4deb3a; ++ *((unsigned long *)&__m128i_result[1]) = 0x003fffffffc3ff44; ++ *((unsigned long *)&__m128i_result[0]) = 0x002eddd0f2931e12; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x4a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbb7743ca4c78461f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd9743eb5fb4deb3a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x22445e1ad9c3e4f0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1b43e8a30a570a63; ++ *((unsigned long *)&__m128i_result[1]) = 0x743ca4c843eb5fb5; ++ *((unsigned long *)&__m128i_result[0]) = 0x45e1ad9c3e8a30a5; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1204900f62f72565; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4901725600000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x6a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000400000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000300000003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f3f3f7fbf3fffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x47); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000040804080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000020100000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffe8ffff28fc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00007fff0000803e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000006ffff81e1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0ffffffe8ffff290; ++ *((unsigned long *)&__m128i_result[0]) = 0x000007fff0000804; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x44); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000418200000008e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000002100047; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636362; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636362; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636362; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636362; ++ *((unsigned long *)&__m128i_result[1]) = 0x0032003200320032; ++ *((unsigned long *)&__m128i_result[0]) = 0x0032003200320032; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff01010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ffdf87f0b0c7f7f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf6b3eb63f6b3f6b3; ++ *((unsigned long *)&__m128i_op1[0]) = 0x363953e42b56432e; ++ *((unsigned long *)&__m128i_result[1]) = 0x010000010080000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x00f700f70036002b; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xed67d6c7ed67ed67; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6c72a7c856ac865c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000700000003; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff40ff83; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000003030103; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000003030103; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000006060; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000006060; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000002408beb26c8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000706e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000028c27; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000070; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80000b0b80000b0b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000101080001010; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffefefffffeff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0061006100020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000fe00fe; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000078087f08; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000078087f08; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000e0fc0000e0fc; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff0bff76; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x75); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x33); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff00ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff00ffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8282828282828282; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000828282828282; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0008000800000008; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00f7000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000005150; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000005150; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000f7000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x24); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41afddcb1c000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd09e1bd99a2c6eb1; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe82f7c27bb0778af; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000040002; ++ *((unsigned long *)&__m128i_result[0]) = 0x000d000a000f000c; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffdff0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0144329880000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007fffc0007ffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0x004000004c400000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001e0000001e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffafff0fff9ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000d800cff8; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000002000007d7; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000300000ff1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x000007d700000ff1; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000ff8; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x74); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000f08; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x2020202020202020; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-SX-vector-vssran-vssrani-vss.patch b/LoongArch-Add-tests-for-SX-vector-vssran-vssrani-vss.patch new file mode 100644 index 0000000000000000000000000000000000000000..3618a4f6ef2c6c02f2bec7134d0b2e9af440f90b --- /dev/null +++ b/LoongArch-Add-tests-for-SX-vector-vssran-vssrani-vss.patch @@ -0,0 +1,4954 @@ +From 1009120c617c050d02a6d2abe786728dccf5cb5b Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Tue, 12 Sep 2023 11:17:38 +0800 +Subject: [PATCH 091/124] LoongArch: Add tests for SX vector + vssran/vssrani/vssrarn/vssrarni/vssrln /vssrlni/vssrlrn/vssrlrni + instructions. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vssran.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vssrani.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vssrarn.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vssrarni.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vssrln.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vssrlni.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vssran.c | 390 ++++++++ + .../loongarch/vector/lsx/lsx-vssrani.c | 679 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vssrarn.c | 669 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vssrarni.c | 848 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vssrln.c | 543 +++++++++++ + .../loongarch/vector/lsx/lsx-vssrlni.c | 668 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vssrlrn.c | 470 ++++++++++ + .../loongarch/vector/lsx/lsx-vssrlrni.c | 597 ++++++++++++ + 8 files changed, 4864 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c +new file mode 100644 +index 000000000..e45ca36f0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c +@@ -0,0 +1,390 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003f00000000003f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffc000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff0000; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffcff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x02b504f305a5c091; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x02b504f305a5c091; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000005602d2; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000003f80b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xb327b9363c992b2e; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa1e7b475d925730f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000001ff00; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0060e050007f0160; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0040007fff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1268f057137a0267; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0048137ef886fae0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x75b043c4d17db125; ++ *((unsigned long *)&__m128i_op1[0]) = 0xeef8227b4f8017b1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x027c027c000027c0; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000006f00000000; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff994db09c; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffc7639d96; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x9); ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f80000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x800080007f008000; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000695d00009b8f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000074f20000d272; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001f5400000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00010000fffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00010000fffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x31b1777777777776; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6eee282828282829; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000006362ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff801c9e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000810000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x40eff02383e383e4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000007fff; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff8000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000c0c00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000a74aa8a55ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6adeb5dfcb000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a7480007fff8000; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000fe00fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000f50000007500; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007e1600007d98; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe00fe7fffffff; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f4f4f4f4f4f0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f4f4f4f4f4f0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f007f7f7f00; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c +new file mode 100644 +index 000000000..7ffcecde7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c +@@ -0,0 +1,679 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001802041b0013; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000007f7f02; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff7fffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff7fffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffff7ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x64); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x47); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0004007c00fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f7f7f7f00107f04; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f0000fd7f0000fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00cf01fe01fe01fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000301de01fe01fe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f00000000000000; ++ __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe31c86e90cda86f7; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000000e3; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x38); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc39fffff007fffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff0e700000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff0000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe00fd1400010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f0000007f000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080000180800100; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff7fc01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x82c539ffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc72df14afbfafdf9; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000c0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001ffffff29; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000020000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000183fffffe5; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fefefe6a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000fbf9; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a000a000a000a00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x4d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f007f007f007f00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff0003003f; ++ __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x4c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007b01ec007b3a9e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fff9fff9; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fff9fffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007ffe7ffe400000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc485edbcc0000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000c485; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21011f3f193d173b; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff39ff37ff35ff33; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000015d926c7; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000e41b; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c0c0c0c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0014000100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x35); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00003f80000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff46; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x4c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffee00000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3a3a3a3b3a3a3a3a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3a3a00003a3a0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000003a0000003a; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x38); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000068; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000038003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000040033; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000007ffc000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffe0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000fff0; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe000100cf005f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000005e94; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00005e96ffffb402; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe000100cf005f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000000bd; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001fc0000fffeff; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000002fffffffb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000010000fffb; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000bffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x42); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x79); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000777777777777; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff7777ffff7777; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000003bbbbbbbbbb; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x45); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0007fff800000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6a5d5b056f2f4978; ++ *((unsigned long *)&__m128i_op1[0]) = 0x17483c07141b5971; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xd4bade5e2e902836; ++ __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000000010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00680486ffffffda; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff913bb9951901; ++ *((unsigned long *)&__m128i_op1[1]) = 0x67157b5100005000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x387c7e0a133f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0c0f000a070f0204; ++ __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xe); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c7c266e3faa293c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x86dd8341b164f12b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9611c3985b3159f5; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff86dd83ff9611c3; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1010111105050000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4040000041410101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000808000020200; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2e2b34ca59fa4c88; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x2e34594c3b000000; ++ __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff1afffefec0ec85; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff1aff6d48ce567f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80c400000148; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff80c1ffffe8de; ++ *((unsigned long *)&__m128i_result[1]) = 0xffe3ffd8ffe30919; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffffffff; ++ __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1313131313131313; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1313131313131313; ++ *((unsigned long *)&__m128i_op1[1]) = 0x34947b4b11684f92; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd73691661e5b68b4; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x084d1a0907151a3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000007d07fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128i_op1[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128i_result[1]) = 0xe0001fffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x60); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffaf1500000fffa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000f8a40000f310; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf654ad7447e59090; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b1b106b8145f50; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x50); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c +new file mode 100644 +index 000000000..a23ad7cd2 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c +@@ -0,0 +1,669 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffd24271c4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2711bad1e8e309ed; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0403cfcf01c1595e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x837cd5db43fc55d4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff80007fff; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffcb410000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffeb827ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc1bdceee242070db; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe8c7b756d76aa478; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xdffdbffeba6f5543; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffffff000000ff; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ffffff000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000002010; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00000000000001; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc1bdceee242070db; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe8c7b756d76aa478; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003fffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003fffff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff000000ff00; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000007ae567a3e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000700ff00000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x006f0efe258ca851; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ffff00; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000f00f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000007fff; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207f7f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff0000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111311111114111; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111311111110000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f417f417f027e03; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9780697084f07dd7; ++ *((unsigned long *)&__m128i_op1[0]) = 0x87e3285243051cf3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fea8ff44; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fea8ff44; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000008000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128i_op0[0]) = 0x545cab1d81a83bea; ++ *((unsigned long *)&__m128i_op1[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128i_op1[0]) = 0x545cab1d81a83bea; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff0015172b; ++ __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x14ccc631eb3339ce; ++ *((unsigned long *)&__m128i_op0[0]) = 0x685670d197a98f2e; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000e36400015253; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000035ed0001e000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000e36400015253; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000035ed0001e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1c6c80007fffffff; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000b4a00008808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080800000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc2fc0000c3040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc2fc0000c3040000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000060000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0600000100000001; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0080006b00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000500000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7efefefe82010201; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff0000ff; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff010300ff0103; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000002ffffffff; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000045340a6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000028404044; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000fffffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000102020204000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x045340a628404044; ++ __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001400000014; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000adad0000adad; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000052520000adad; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd6a09e662ab46b31; ++ *((unsigned long *)&__m128i_op0[0]) = 0x34b8122ef4054bb3; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9b509be72f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3513f2e3a1774d2c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000501ffff0005; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0021b761002c593c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x002584710016cc56; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff0000ffff; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00020000ffff0001; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x004001be00dc008e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1f3f06d4fcba4e98; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2e1135681fa8d951; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000007d07fffffff; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000008686; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00008e5680008685; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007fff7fff8000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc7f100004000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c7f14000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4500000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4400000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff000000ff000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8a8a8a8a8a8a8a8a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8a8a8a8a8a8a8a8a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c +new file mode 100644 +index 000000000..76fac97be +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c +@@ -0,0 +1,848 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff60090958; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0fa96b88d9944d42; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001802041b0013; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x72); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0200020002000200; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x5c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xda4643d5301c4000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc1fc0d3bf55c4000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7505853d654185f5; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01010000fefe0101; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00020002000d0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000020f2300ee; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x79); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000073; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000010000002b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000400000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x59); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001800390049ffaa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0029ff96005cff88; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x03c0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x03c0038000000380; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f0000000f000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0bef0b880bd80bd8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000017b017b01; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x5b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf0800320fff1fa20; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0032000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f417f417f027e03; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x60); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000065a0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9941d155f43a9d08; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c0c8b8a8b8b0b0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8b8a8a898a8a8909; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1817161517161514; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1615141315141312; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc0fffff000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffe00000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x29); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010001000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000080000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x58); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x31); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0d1202e19235e2bc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xea38e0f75f6e56d1; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe500ffffc085; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffc000ffffc005; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100080000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0400400204004002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffe080f6efc100f7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xefd32176ffe100f7; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffe080f6efc100f7; ++ *((unsigned long *)&__m128i_op1[0]) = 0xefd32176ffe100f7; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000005452505; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000004442403e4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x03fc03fc03fc03fc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000b4a00008808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080800000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x71); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2ea268972ea2966a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4026f4ffbc175bff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0fffffff00001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff0fffffff09515; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff00000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000003000000d612; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000bfffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000500000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000c0c0c000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe1fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7ffffffb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000080008; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1ab6021f72496458; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7750af4954c29940; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1ab6021f72496458; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7750af4954c29940; ++ *((unsigned long *)&__m128i_result[1]) = 0x6ad8ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x6ad8ffffffffffff; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002008300500088; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000088; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1200091212121212; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x51); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000c6c6c6c6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c6c6c6c6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffeff98; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0014ffe4ff76ffc4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff86dd83ff9611c3; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000035697d4e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000013ecaadf2; ++ *((unsigned long *)&__m128i_result[1]) = 0xe280e67f00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007f7f00007f80; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x017001a002c80260; ++ *((unsigned long *)&__m128i_op0[0]) = 0x01d8000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2e34594c3b000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00060fbf02596848; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020fbf04581ec0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x010169d9010169d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01010287010146a1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200000001; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x06d9090909090909; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x48); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0039d21e3229d4e8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6d339b4f3b439885; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffff000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000d00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffc0000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000100000001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x37b951002d81a921; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000075dbe982; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000071e48cca; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0ebb7d300e3c9199; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000930400008a10; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00006f9100007337; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128i_result[1]) = 0x00250023001c001d; ++ *((unsigned long *)&__m128i_result[0]) = 0x309d2f342a5d2b34; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff01ffffe41f0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff00000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000155; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000002b; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfee1f6f18800ff7f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c +new file mode 100644 +index 000000000..ed600c72d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c +@@ -0,0 +1,543 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808000008080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080000080800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5ff6a0a40ea8f47c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5ff6a0a40e9da42a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003ff000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fffc00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001afffffff7; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000750500006541; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000100fffffefd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff6fc00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f0000007f000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080000180800100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff00ffff; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x7); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffefff6fff80002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101017f0101017f; ++ __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00005a5a00005a5a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00005b5a00005b5a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x65b780a2ae3bf8ca; ++ *((unsigned long *)&__m128i_op1[0]) = 0x161d0c373c200827; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000001ff; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf10cf508f904fd01; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf10cf508f904fd01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf8f8e018f8f8e810; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf8f8f008f8f8f800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fffefffefffef; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000c0000bd49; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000c7fff000c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000f0009d3c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000016fff9d3d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000c000000060003; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003a247fff7fff; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003fbf3fbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fbf3fbf00007fff; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000fff00000e36; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000fff0e36; ++ __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffe000ffdf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc0800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff0018; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080000700000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffbffda; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3e25c8317394dae6; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcda585aebbb2836a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ac00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000c6c6c6c6; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c6c6c6c6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x64616462b76106dc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x64616462b71d06c2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00c0c000c0000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc0000000c000c000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00c0c000c0000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc0000000c000c000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001e001e001e001e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001e001e001e001e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_op0[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_op1[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff7fff; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffc0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000001; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007fff7fff8000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000007f7f7f; ++ __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf589caff5605f2fa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000a74aa8a55ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6adeb5dfcb000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c +new file mode 100644 +index 000000000..613668143 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c +@@ -0,0 +1,668 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001ffff00000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x4f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004e005500060031; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff870068fff5ffb3; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004e005500060031; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff870068fff5ffb3; ++ *((unsigned long *)&__m128i_result[1]) = 0x04e00060ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x04e00060ffffffff; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808000008080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080000080800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001010100010100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000080007f80800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00047fff00007fff; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x01fc020000fe0100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000003fc0003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x56); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c03e17edd781b11; ++ *((unsigned long *)&__m128i_op0[0]) = 0x342caf9bffff1fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000040000000400; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0c037fff342c7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fff80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x37); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff100fffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x21); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff100fffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff100fffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x38); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x4b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a000a000a000a00; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf2f2e5e5e5e5e5dc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003fc0; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x22); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x35); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x35); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x41dfffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000083b00000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x33); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1ff85ffe2ae5d973; ++ *((unsigned long *)&__m128i_op1[1]) = 0x403be000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000ffc2f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00201df000000000; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x29); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000005151515; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000006302e00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f417f417f027e03; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001fd0; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffff7f; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x5f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000202fe02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3918371635143312; ++ *((unsigned long *)&__m128i_op1[1]) = 0x21201f1e1d1c1b1a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x480f7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00005dcbe7e830c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000001fffff59; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x63); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007f41; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000002000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x39); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x685670d27e00682a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x685670d27e00682a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc000000fc0003fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbffffff0ffffc00f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000003f0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffc3ffff003e; ++ *((unsigned long *)&__m128i_result[1]) = 0x00c0000000bfffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ffffff; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x800000810000807f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x808080010080007f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x800000810000807f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x808080010080007f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000020000020; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x62); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0400400204004002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x6d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2a29282726252423; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2a29282726252423; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00a8009800880078; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000807f00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80006b0080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff00007fff7fff; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000001fe01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000001fe01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f0f0f0f00000000; ++ __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff010300ff0103; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x555500adfffc5cab; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010100000100; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03ff0101fc010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03fffffffc010102; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000300037ff000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003000300a10003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000007070707; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x45); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffdfffcfffdfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffcfffdfffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000053a4f452; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000053a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000b3a6000067da; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00004e420000c26a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x7a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x38); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7c7c000000007176; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000c6c7; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8d8d8d8d8d8cc6c6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000aa822a8228222; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03aa558ec8546eb6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001a64b345308091; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001f2f2cab1c732a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0155ffff754affff; ++ *((unsigned long *)&__m128i_result[0]) = 0x034cffff03e5ffff; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc1bdceee242070dc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe907b754d7eaa478; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002711350a27112; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00d5701794027113; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000203000010d0; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc00300000220; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000090900000998; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x20); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001000010f8; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f0f0f0f00000f00; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c +new file mode 100644 +index 000000000..ec688bb12 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c +@@ -0,0 +1,470 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff00007fff7fff; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00040003ff83ff84; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00040003ff4dffca; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000002020202; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffbe6ed563; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff732a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000fbf9; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000007f00000000; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000004fc04f81; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000004fc04f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007f7f00007f7f; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc1000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffc1000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff000000007fff; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffff0000000ad3d; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff000fffff000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff0000; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf001f0010101f002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000dfa6e0c6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000d46cdc13; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80df00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007f7f00007f7f; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff3fbfffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fbf3fbf00007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007f7f7f01027f02; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffe0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3f413f4100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000100000000fc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000100000000fc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100000001000000; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a09080709080706; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000040a04000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000040a04000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00123fff00120012; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0012001200120012; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00003fff00010000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1200091212121212; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0800010001ff8000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2e9028362e902836; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2e9028362e902836; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001000000010; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000024170000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002711350a27112; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00d5701794027113; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0674c886fcba4e98; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfdce8003090b0906; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff001a00000000; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001000000010; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001fffe00014b41; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffe0001ffde; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000100020002; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c +new file mode 100644 +index 000000000..02f7ca08b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c +@@ -0,0 +1,597 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808000008080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080000080800000; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000007f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000000; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010400100203; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0103010301020109; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000110000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000007f00000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0202000402020202; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000200000010000; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x56); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x6d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0001ffff8002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010000400020004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff20ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc0020ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x07fff80000008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000007ffe001; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_d_q (__m128i_op0, __m128i_op1, 0x7c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e3b94f2ca31; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000001f807b89; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000005050000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0505000005050505; ++ *((unsigned long *)&__m128i_result[1]) = 0x000d02540000007e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001400140014; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x41); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x56a09e662ab46b31; ++ *((unsigned long *)&__m128i_op1[0]) = 0xb4b8122ef4054bb3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x02b504f305a5c091; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x37); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000d000d000d000d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000d000d000d000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000680000006800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00005555aaabfffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffffff000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000000ab; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x43); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff7fff; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000080; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x34); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000004f804f81; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000004f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001400000014; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff81007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffb7005f0070007c; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80007e028401; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9a10144000400000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001ffff00010; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x5b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x29); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffff9cff05; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff9cfebd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff7ffffef77fffdd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf77edf9cffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001fffff001fffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001fffff001fffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x21201f1e1d1c1b1a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x10ff10ff10ff10ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffa6ff91fdd8ef77; ++ *((unsigned long *)&__m128i_op0[0]) = 0x061202bffb141c38; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x010101fe0101fe87; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000004000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd60001723aa5f8; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007f007f7f; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x808080e280808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080636380806363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x808080e280808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080636380806363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000d0000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000dffff000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000070007; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000007ffff; ++ __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800c00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff7fff; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff0100ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0607060700000807; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0707f8f803e8157e; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x31); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x21); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc0808000c0808000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000003020302; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffc0800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000008080600; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op0[0]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op1[1]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003ef89df07f0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003ec0fc0fbfe001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff800ff2fe6c00d; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff40408ece0e0de; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4000400040004000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff960001005b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffa500010003; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0020000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_d_q (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4000000040000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x117d7f7b093d187f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe1bfefe00011ee1; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe1bfe6c03824c60; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f7f7f7f0000001a; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f017f7f7f7f7f; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff3a81ffff89fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffb3c3ffff51ba; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0802080408060803; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff00ffffff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff000900ffff98; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xc); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000056000056; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000efffefff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa03aa03ae3e2e3e2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_d_q (__m128i_op0, __m128i_op1, 0x75); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000760151; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003e0021009a009a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000246d9755; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000003e2427c2ee; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001e5410082727; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007f7f00107f7f; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000f1384; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000004ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f8000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-for-the-SX-vector-multiplication.patch b/LoongArch-Add-tests-for-the-SX-vector-multiplication.patch new file mode 100644 index 0000000000000000000000000000000000000000..b39a8e32e2adc8e6ba6d07cc6fe7f4a76a4d9004 --- /dev/null +++ b/LoongArch-Add-tests-for-the-SX-vector-multiplication.patch @@ -0,0 +1,2990 @@ +From 239d4bdbbc72f83efba3830203443b0b2ba4f2ca Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 10:15:12 +0800 +Subject: [PATCH 083/124] LoongArch: Add tests for the SX vector multiplication + instruction. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmul.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/lsx/lsx-vmuh-1.c | 353 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vmuh-2.c | 372 +++++++++++++++ + .../loongarch/vector/lsx/lsx-vmul.c | 282 ++++++++++++ + .../loongarch/vector/lsx/lsx-vmulwev-1.c | 434 ++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vmulwev-2.c | 344 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vmulwev-3.c | 245 ++++++++++ + .../loongarch/vector/lsx/lsx-vmulwod-1.c | 272 +++++++++++ + .../loongarch/vector/lsx/lsx-vmulwod-2.c | 282 ++++++++++++ + .../loongarch/vector/lsx/lsx-vmulwod-3.c | 308 +++++++++++++ + 9 files changed, 2892 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c +new file mode 100644 +index 000000000..ab650a024 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c +@@ -0,0 +1,353 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x059a35ef139a8e00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[0]) = 0x4040404040404040; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc0c00000c0c00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc0c00c01c2cd0009; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc0fffff000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffe00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff0000ac26; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ffffff81fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffff00ffff7e01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000fffe01fd02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000fe86; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff8000010f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fff80000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbf3efff536d5169b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ebdfffffddf3f40; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f5ec0a0feefa0b0; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fffffff3ffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fffffff3ffffffe; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff0101ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffffa0204000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001f7fc100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x001f7fff00000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000cd630000cd63; ++ *((unsigned long *)&__m128i_op1[1]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffcd63ffffcd63; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffd765ffffd765; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff7fffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0040000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000015516a768038; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffff9ed2e1c000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x007ffd0001400840; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007ffd0001400840; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fffffff80000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003ffd000a4000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000009c400000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0202fe02fd020102; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000202fe02; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000006362ffff; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffe0002; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c +new file mode 100644 +index 000000000..60b6e3503 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c +@@ -0,0 +1,372 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000c5ac01015b; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaaacac88a3a9a96a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000038003; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000040033; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000068; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0ff780a10efc01af; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fe7f0000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000efffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001001100110068; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1d8000001d800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1d8000001d800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1d8000001d800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1d8000001d800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0366000003660000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0366000003660000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc5c534920000c4ed; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long *)&__m128i_op0[0]) = 0x28bf0351ec69b5f2; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff6080ffff4417; ++ *((unsigned long *)&__m128i_result[1]) = 0x7ef3ddac21fc5a2c; ++ *((unsigned long *)&__m128i_result[0]) = 0x28bee9edec690869; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x0); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe000ffdf; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000214f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc31b63d846ebc810; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ff0000800000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff941d; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000010a7; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000046ebaa2c; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000cf4f4f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000cf4f4f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000005f0003e000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000408; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000003397dd140; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000004bd7cdd20; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0016ffb00016ffb0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0016ffb00016ffb0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000004a294b; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000006d04bc; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007ffe7ffe400000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007ffd0001400840; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffa800000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000157; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001a64b345308091; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001f2f2cab1c732a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1baf8eabd26bc629; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1c2640b9a8e9fb49; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002dab8746acf8e; ++ *((unsigned long *)&__m128i_result[0]) = 0x00036dd1c5c15856; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00003a7fc58074ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000eeff1100e; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c +new file mode 100644 +index 000000000..8ba666275 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c +@@ -0,0 +1,282 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x54feed87bc3f2be1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8064d8f6a494afcb; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffe003c1f0077; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffff0074230438; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff0000000438; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000800800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000800800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000004000000000; ++ __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff5fff4002ffff5; ++ *((unsigned long *)&__m128i_op1[1]) = 0xaa858644fb8b3d49; ++ *((unsigned long *)&__m128i_op1[0]) = 0x18499e2cee2cc251; ++ *((unsigned long *)&__m128i_result[1]) = 0x8644000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xaed495f03343a685; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_result[1]) = 0xb71289fdfbea3f69; ++ *((unsigned long *)&__m128i_result[0]) = 0x4e17c2ffb4851a40; ++ __m128i_out = __lsx_vmul_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe00fd1400010000; ++ *((unsigned long *)&__m128i_result[1]) = 0xc72ef153fc02fdf7; ++ *((unsigned long *)&__m128i_result[0]) = 0xca31bf15fd010000; ++ __m128i_out = __lsx_vmul_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc000c000c000ff81; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5d5d5d5d5d5d5d5d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5d5d5d5d5d5d0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long *)&__m128i_result[0]) = 0xc605c000aedd0000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xacc8c794af2caf01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa91e2048938c40f0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xeeb1e4f43c3763f3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff5a6fe3d7; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000021e79364; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000718ea657431b; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000006ca193ec; ++ *((unsigned long *)&__m128i_result[0]) = 0x00008e72b5b94cad; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x467f6080467d607f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007f008000ea007f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0042003e0042002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffc0001fffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffe0004fffe0004; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc1bdceee242070db; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe8c7b756d76aa478; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f433212dce09025; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x86dd8341b164f12b; ++ *((unsigned long *)&__m128i_result[0]) = 0x9611c3985b3159f5; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd27db010d20fbf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffd27db010d20fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x9727b8499727b849; ++ *((unsigned long *)&__m128i_result[0]) = 0x12755900b653f081; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x02f3030303030303; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x06d9090909090909; ++ __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff81ffff7f03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x04ffff8101ff81ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0a0000001e000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a000000f6000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x317fce80317fce80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c +new file mode 100644 +index 000000000..8357f4e80 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c +@@ -0,0 +1,434 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001f7fc100000404; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000002a000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff0101ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffffa0204000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffe1ffc100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000400000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000009000900; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000009000900; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffc3; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff9dff9dff9dff9d; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000efffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe50000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffe020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fc00000010a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001b0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff81007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffb7005f0070007c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000007c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000005f0003e000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffbfc0ffffbfc0; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff0100000001; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000004870ba0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x478b478b38031779; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6b769e690fa1e119; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fe98c2a0; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007770ffff9411; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007770ffff9411; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000100000001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x37b951002d81a921; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000e0000000e0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000e0000000e0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000c400; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffff98dea; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000f80007; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000006c80031; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010001; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0004280808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010203030201000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000808080800; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000b5207f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2000000020000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200200000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x6a57a30ff0000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff700000009; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8001000180010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8001000184000800; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff80007e028401; ++ *((unsigned long *)&__m128i_result[0]) = 0x9a10144000400000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000bd003d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000077af9450; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000047404f4f040d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000214f; ++ *((unsigned long *)&__m128i_result[0]) = 0xc31b63d846ebc810; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c +new file mode 100644 +index 000000000..e4afc8247 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c +@@ -0,0 +1,344 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe01fe01fe01fe01; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fe01fe01; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000200020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6a5d5b056f2f4978; ++ *((unsigned long *)&__m128i_op0[0]) = 0x17483c07141b5971; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd4bade5e2e902836; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x345002920f3017d6; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000c0010000a186; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00067fff0002a207; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0002; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_result[0]) = 0x05fafe0101fe000e; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc1f03e1042208410; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00f0001000000010; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000eefff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3a80613fda5dcb4a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x93f0b81a914c003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000051649b6; ++ *((unsigned long *)&__m128i_result[0]) = 0xd2f005e44bb43416; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001fffe00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffff000f0008d3c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff0016fff8d3d; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffff000f0008d3c; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff0016fff8d3d; ++ *((unsigned long *)&__m128i_result[1]) = 0xe10000004deb2610; ++ *((unsigned long *)&__m128i_result[0]) = 0xe101e0014dec4089; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x111110ff11111141; ++ *((unsigned long *)&__m128i_op1[0]) = 0x11111131111116a6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2028000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001001100110068; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd400c02000002acf; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf4000020c4000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6453f5e01d6e5000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000fdec000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc5c534920000c4ed; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_op0[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_op1[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000021100000211; ++ *((unsigned long *)&__m128i_result[0]) = 0xfb141d31fb141d31; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f800000976801fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x837c1ae57f8012ed; ++ *((unsigned long *)&__m128i_result[1]) = 0x976801fd6897fe02; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8012ec807fed13; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000100010001fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0909090900000909; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0909090909090909; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a80613fda5dcb4a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x93f0b81a914c003b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fd1654860000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1e242e4d68dc0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x2ff8fddb7ae20000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff000ff6220c0c1; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffe8081000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c +new file mode 100644 +index 000000000..346f0316a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c +@@ -0,0 +1,245 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01ff01ff01fc10; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0042003e0042002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffc0001fffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffbeffc2ffbeffd1; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80000000fff80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000004000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff8004000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff8607db959f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000008a0000008a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000008900000009; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000043c5ea7b6; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000008fc4ef7b4; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff46; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff46000000ba; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf8f8372f752402ee; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffc0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80044def00000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x00007f8449a19084; ++ *((unsigned long *)&__m128i_result[0]) = 0x49a210000000ff00; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfd000000fb00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fe00f8000700; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xfdfef9ff0efff900; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7efefefe82010201; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x7afafaf88a050a05; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcda585aebbb2836a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcda585aebbb2836a; ++ *((unsigned long *)&__m128i_result[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long *)&__m128i_result[0]) = 0x5779108fdedda7e4; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0xd48acbfe13102acf; ++ *((unsigned long *)&__m128i_result[0]) = 0xf4af70d0c4000000; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000056; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffff86; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000eefff; ++ *((unsigned long *)&__m128i_result[0]) = 0xf8e1a03affffe3e2; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c +new file mode 100644 +index 000000000..6eea49a61 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c +@@ -0,0 +1,272 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000006; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0100010000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0100010000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_op1[1]) = 0x67157b5100005000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x387c7e0a133f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000004870ba0; ++ __m128i_out = __lsx_vmulwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefe000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000155; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff8001ffff8001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3ff0010000000000; ++ __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x440ef000440ef000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4400000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0f8d33000f8d3300; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003b80000000000; ++ __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0018001800180018; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0018001800180018; ++ *((unsigned long *)&__m128i_op1[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd83c8081ffff808f; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff489b693120950; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffc45a851c40c18; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3e1f321529232736; ++ *((unsigned long *)&__m128i_op1[0]) = 0x161d0c373c200826; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003f8000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff000000007fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c6fde000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fef01000f27ca; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000010000010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101000001000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000ffef0010000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffe0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000005452505; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000044525043c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fc03fc000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffc03fc040; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe000100cf005f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000400028000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc110000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc00d060000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf047ef0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007fff7fff8000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff100000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000f0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c +new file mode 100644 +index 000000000..f3e4e0390 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c +@@ -0,0 +1,282 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004e005500060031; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff870068fff5ffb3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfa31dfa21672e711; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1304db85e468073a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000150000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffeffff001effff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffff1a0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000f00f; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x4); ++ *((unsigned long *)&__m128i_op0[1]) = 0xe2560afe9c001a18; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe2560afe9c001a18; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x05fafe0101fe000e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000d82; ++ *((unsigned long *)&__m128i_result[0]) = 0x046a09ec009c0000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f3f018000000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf0fd800080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000a00028004000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000005a00000228; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff9ee000004ec; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1f54e0ab00000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00e4880080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0080810080808100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff011fb11181d8ea; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80ff800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe00fe000200fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe000200fe; ++ *((unsigned long *)&__m128i_result[1]) = 0x00fd02fe00002302; ++ *((unsigned long *)&__m128i_result[0]) = 0x007ffd0200000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffe0001fffe0001; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x685670d27e00682a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x14ccc631eb3339ce; ++ *((unsigned long *)&__m128i_result[0]) = 0x685670d197a98f2e; ++ __m128i_out = __lsx_vmulwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffc0ffc0003f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffc0ffc0003f003f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007770ffff941d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000ffff000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000077529b522400; ++ __m128i_out = __lsx_vmulwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000111111312; ++ *((unsigned long *)&__m128i_result[0]) = 0x2222272111111410; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001c88bf0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001c88bf0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffff800000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff0015172b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffffff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000600000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000c6c6c6c6; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c6c6c6c6; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000c6c7; ++ *((unsigned long *)&__m128i_result[0]) = 0x8d8d8d8d8d8cc6c6; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f7f00007f7f7500; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3b42017f3a7f7f01; ++ *((unsigned long *)&__m128i_result[1]) = 0x04faf60009f5f092; ++ *((unsigned long *)&__m128i_result[0]) = 0x04fafa9200000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c +new file mode 100644 +index 000000000..9f5702e2c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c +@@ -0,0 +1,308 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff020000fff4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001ee100000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f5ec0a0feefa0b0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff02d060; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80000000fff8fff8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000100; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000004a294b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000006d04bc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x55aa55aa55aa55ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaa55555655aaaaa8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ef4002d21fc7001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x28bf02d1ec6a35b2; ++ *((unsigned long *)&__m128i_result[1]) = 0x2a7b7c9260f90ee2; ++ *((unsigned long *)&__m128i_result[0]) = 0x1b1c6cdfd57f5736; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000004040504; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000004040504; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000feff01; ++ *((unsigned long *)&__m128i_result[0]) = 0x00feff0100000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010202050120; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010102020202; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0982e2daf234ed87; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0ae3072529fbfe78; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x030804010d090107; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1313131313131313; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1313131313131313; ++ *((unsigned long *)&__m128i_result[1]) = 0x0039d21e3229d4e8; ++ *((unsigned long *)&__m128i_result[0]) = 0x6d339b4f3b439885; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-tests-of-mstrict-align-option.patch b/LoongArch-Add-tests-of-mstrict-align-option.patch new file mode 100644 index 0000000000000000000000000000000000000000..f35b146de43582e6c50d7da0195625a3a86ecd25 --- /dev/null +++ b/LoongArch-Add-tests-of-mstrict-align-option.patch @@ -0,0 +1,37 @@ +From f07b91862055533d779fbf76c12cb7c0ae75b53d Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 09:35:24 +0800 +Subject: [PATCH 076/124] LoongArch: Add tests of -mstrict-align option. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/strict-align.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/testsuite/gcc.target/loongarch/strict-align.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/strict-align.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/strict-align.c b/gcc/testsuite/gcc.target/loongarch/strict-align.c +new file mode 100644 +index 000000000..040d84958 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/strict-align.c +@@ -0,0 +1,12 @@ ++/* { dg-do compile } */ ++/* { dg-options "-Ofast -mstrict-align -mlasx" } */ ++/* { dg-final { scan-assembler-not "vfadd.s" } } */ ++ ++void ++foo (float *restrict x, float *restrict y) ++{ ++ x[0] = x[0] + y[0]; ++ x[1] = x[1] + y[1]; ++ x[2] = x[2] + y[2]; ++ x[3] = x[3] + y[3]; ++} +-- +2.33.0 + diff --git a/LoongArch-Add-testsuite-framework-for-Loongson-SX-AS.patch b/LoongArch-Add-testsuite-framework-for-Loongson-SX-AS.patch new file mode 100644 index 0000000000000000000000000000000000000000..23e5f38a96c08b93ac5363cc030c57391fda7987 --- /dev/null +++ b/LoongArch-Add-testsuite-framework-for-Loongson-SX-AS.patch @@ -0,0 +1,131 @@ +From aebd03c944312be767f03d129eeebc0c4cdf5b4a Mon Sep 17 00:00:00 2001 +From: Xiaolong Chen +Date: Mon, 11 Sep 2023 09:36:35 +0800 +Subject: [PATCH 077/124] LoongArch: Add testsuite framework for Loongson + SX/ASX. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/loongarch-vector.exp: New test. + * gcc.target/loongarch/vector/simd_correctness_check.h: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../loongarch/vector/loongarch-vector.exp | 42 +++++++++++++++ + .../loongarch/vector/simd_correctness_check.h | 54 +++++++++++++++++++ + 2 files changed, 96 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp b/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp +new file mode 100644 +index 000000000..2c37aa91d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp +@@ -0,0 +1,42 @@ ++#Copyright(C) 2023 Free Software Foundation, Inc. ++ ++#This program is free software; you can redistribute it and / or modify ++#it under the terms of the GNU General Public License as published by ++#the Free Software Foundation; either version 3 of the License, or ++#(at your option) any later version. ++# ++#This program is distributed in the hope that it will be useful, ++#but WITHOUT ANY WARRANTY; without even the implied warranty of ++#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the ++#GNU General Public License for more details. ++# ++#You should have received a copy of the GNU General Public License ++#along with GCC; see the file COPYING3.If not see ++# . ++ ++#GCC testsuite that uses the `dg.exp' driver. ++ ++#Exit immediately if this isn't a LoongArch target. ++if ![istarget loongarch*-*-*] then { ++ return ++} ++ ++#Load support procs. ++load_lib gcc-dg.exp ++ ++#If a testcase doesn't have special options, use these. ++global DEFAULT_CFLAGS ++if ![info exists DEFAULT_CFLAGS] then { ++ set DEFAULT_CFLAGS " " ++} ++ ++#Initialize `dg'. ++dg-init ++ ++#Main loop. ++dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/lsx/*.\[cS\]]] \ ++ " -mlsx" $DEFAULT_CFLAGS ++dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/lasx/*.\[cS\]]] \ ++ " -mlasx" $DEFAULT_CFLAGS ++# All done. ++dg-finish +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h b/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h +new file mode 100644 +index 000000000..eb7fbd59c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h +@@ -0,0 +1,54 @@ ++#include ++#include ++#include ++ ++#define ASSERTEQ_64(line, ref, res) \ ++ do \ ++ { \ ++ int fail = 0; \ ++ for (size_t i = 0; i < sizeof (res) / sizeof (res[0]); ++i) \ ++ { \ ++ long *temp_ref = &ref[i], *temp_res = &res[i]; \ ++ if (abs (*temp_ref - *temp_res) > 0) \ ++ { \ ++ printf (" error: %s at line %ld , expected " #ref \ ++ "[%ld]:0x%lx, got: 0x%lx\n", \ ++ __FILE__, line, i, *temp_ref, *temp_res); \ ++ fail = 1; \ ++ } \ ++ } \ ++ if (fail == 1) \ ++ abort (); \ ++ } \ ++ while (0) ++ ++#define ASSERTEQ_32(line, ref, res) \ ++ do \ ++ { \ ++ int fail = 0; \ ++ for (size_t i = 0; i < sizeof (res) / sizeof (res[0]); ++i) \ ++ { \ ++ int *temp_ref = &ref[i], *temp_res = &res[i]; \ ++ if (abs (*temp_ref - *temp_res) > 0) \ ++ { \ ++ printf (" error: %s at line %ld , expected " #ref \ ++ "[%ld]:0x%x, got: 0x%x\n", \ ++ __FILE__, line, i, *temp_ref, *temp_res); \ ++ fail = 1; \ ++ } \ ++ } \ ++ if (fail == 1) \ ++ abort (); \ ++ } \ ++ while (0) ++ ++#define ASSERTEQ_int(line, ref, res) \ ++ do \ ++ { \ ++ if (ref != res) \ ++ { \ ++ printf (" error: %s at line %ld , expected %d, got %d\n", __FILE__, \ ++ line, ref, res); \ ++ } \ ++ } \ ++ while (0) +-- +2.33.0 + diff --git a/LoongArch-Adjust-C-multilib-header-layout.patch b/LoongArch-Adjust-C-multilib-header-layout.patch new file mode 100644 index 0000000000000000000000000000000000000000..8fc16d8c6e505784cf7b95ca7d5e9e10687bcea1 --- /dev/null +++ b/LoongArch-Adjust-C-multilib-header-layout.patch @@ -0,0 +1,53 @@ +From a4bf17e87a965ed7f2bb1d2921fb9dd820c79a96 Mon Sep 17 00:00:00 2001 +From: Yang Yujie +Date: Thu, 7 Sep 2023 14:50:10 +0800 +Subject: [PATCH 069/124] LoongArch: Adjust C++ multilib header layout. + +For LoongArch, the toplevel library build is always aliased to +one of the multilib variants. This patch installs it with the +actual MULTISUBDIR (instead of ".") so that the headers can be +reached by the compiler. + +This patch is an update of +https://gcc.gnu.org/pipermail/gcc-patches/2023-September/629435.html + +libstdc++-v3/ChangeLog: + + * configure.host: Register t-loongarch in tmake_file. + * config/cpu/loongarch/t-loongarch: New file. Manually refresh + MULTISUBDIR with $(shell $(CXX) --print-multi-directory). + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + libstdc++-v3/config/cpu/loongarch/t-loongarch | 1 + + libstdc++-v3/configure.host | 5 ++++- + 2 files changed, 5 insertions(+), 1 deletion(-) + create mode 100644 libstdc++-v3/config/cpu/loongarch/t-loongarch + +diff --git a/libstdc++-v3/config/cpu/loongarch/t-loongarch b/libstdc++-v3/config/cpu/loongarch/t-loongarch +new file mode 100644 +index 000000000..adfc8ebb9 +--- /dev/null ++++ b/libstdc++-v3/config/cpu/loongarch/t-loongarch +@@ -0,0 +1 @@ ++AM_MAKEFLAGS += " MULTISUBDIR=/$(shell $(CXX) --print-multi-directory)" +diff --git a/libstdc++-v3/configure.host b/libstdc++-v3/configure.host +index ec32980aa..592160e6d 100644 +--- a/libstdc++-v3/configure.host ++++ b/libstdc++-v3/configure.host +@@ -315,7 +315,10 @@ esac + # Set any OS-dependent and CPU-dependent bits. + # THIS TABLE IS SORTED. KEEP IT THAT WAY. + case "${host}" in +- *-*-linux* | *-*-uclinux*) ++ loongarch*) ++ tmake_file="cpu/loongarch/t-loongarch" ++ ;; ++ *-*-linux* | *-*-uclinux*) + case "${host_cpu}" in + i[567]86) + abi_baseline_pair=i486-linux-gnu +-- +2.33.0 + diff --git a/LoongArch-Avoid-RTL-flag-check-failure-in-loongarch_.patch b/LoongArch-Avoid-RTL-flag-check-failure-in-loongarch_.patch new file mode 100644 index 0000000000000000000000000000000000000000..d034f41c68d9b8cfe65f7ef95afb71c6b842ce0f --- /dev/null +++ b/LoongArch-Avoid-RTL-flag-check-failure-in-loongarch_.patch @@ -0,0 +1,55 @@ +From e82403e918e18fa8e8ecd0c9e26f2657cc814e12 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Wed, 24 Aug 2022 21:31:34 +0800 +Subject: [PATCH 013/124] LoongArch: Avoid RTL flag check failure in + loongarch_classify_symbol + +SYMBOL_REF_TLS_MODEL invokes SYMBOL_REF_FLAGS, and SYMBOL_REF_FLAGS +invokes RTL_FLAG_CHECK1 and aborts when RTL code is not SYMBOL_REF. + +r13-1833 removed "gcc_assert (SYMBOL_REF_P (x))" before invoking +"SYMBOL_REF_TLS_MODEL (x)", indicating that it's now possible that "x" +is not a SYMBOL_REF. So we need to check if "x" is SYMBOL_REF first. + +This fixes a test failure happening with r13-2173 with RTL flag +checking enabled: + + pr106096.C:26:1: internal compiler error: RTL flag check: + SYMBOL_REF_FLAGS used with unexpected rtx code 'const' in + loongarch_classify_symbol + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc (loongarch_classify_symbol): + Return early if the rtx is not SYMBOL_REF. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.cc | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 04c4ddaed..452aba9d4 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -1633,14 +1633,13 @@ loongarch_rtx_constant_in_small_data_p (machine_mode mode) + static enum loongarch_symbol_type + loongarch_classify_symbol (const_rtx x) + { +- if (LABEL_REF_P (x)) ++ if (!SYMBOL_REF_P (x)) + return SYMBOL_PCREL; + + if (SYMBOL_REF_TLS_MODEL (x)) + return SYMBOL_TLS; + +- if (SYMBOL_REF_P (x) +- && !loongarch_symbol_binds_local_p (x)) ++ if (!loongarch_symbol_binds_local_p (x)) + return SYMBOL_GOT_DISP; + + return SYMBOL_PCREL; +-- +2.33.0 + diff --git a/LoongArch-Avoid-non-returning-indirect-jumps-through.patch b/LoongArch-Avoid-non-returning-indirect-jumps-through.patch new file mode 100644 index 0000000000000000000000000000000000000000..3fa6556c964758a3b0ce09f0ae207dbcb521a741 --- /dev/null +++ b/LoongArch-Avoid-non-returning-indirect-jumps-through.patch @@ -0,0 +1,62 @@ +From 7e759740048ee6f24c1055c32868fa21cabb4f75 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Wed, 7 Jun 2023 10:21:58 +0800 +Subject: [PATCH 048/124] LoongArch: Avoid non-returning indirect jumps through + $ra [PR110136] + +Micro-architecture unconditionally treats a "jr $ra" as "return from subroutine", +hence doing "jr $ra" would interfere with both subroutine return prediction and +the more general indirect branch prediction. + +Therefore, a problem like PR110136 can cause a significant increase in branch error +prediction rate and affect performance. The same problem exists with "indirect_jump". + +gcc/ChangeLog: + + PR target/110136 + * config/loongarch/loongarch.md: Modify the register constraints for template + "jumptable" and "indirect_jump" from "r" to "e". + +Co-authored-by: Andrew Pinski +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.md | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index b23248c33..c79951c1d 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -2895,6 +2895,10 @@ + } + [(set_attr "type" "branch")]) + ++;; Micro-architecture unconditionally treats a "jr $ra" as "return from subroutine", ++;; non-returning indirect jumps through $ra would interfere with both subroutine ++;; return prediction and the more general indirect branch prediction. ++ + (define_expand "indirect_jump" + [(set (pc) (match_operand 0 "register_operand"))] + "" +@@ -2905,7 +2909,7 @@ + }) + + (define_insn "@indirect_jump" +- [(set (pc) (match_operand:P 0 "register_operand" "r"))] ++ [(set (pc) (match_operand:P 0 "register_operand" "e"))] + "" + "jr\t%0" + [(set_attr "type" "jump") +@@ -2928,7 +2932,7 @@ + + (define_insn "@tablejump" + [(set (pc) +- (match_operand:P 0 "register_operand" "r")) ++ (match_operand:P 0 "register_operand" "e")) + (use (label_ref (match_operand 1 "" "")))] + "" + "jr\t%0" +-- +2.33.0 + diff --git a/LoongArch-Change-the-default-value-of-LARCH_CALL_RAT.patch b/LoongArch-Change-the-default-value-of-LARCH_CALL_RAT.patch new file mode 100644 index 0000000000000000000000000000000000000000..f18f741064c68082620c224da9595d7b1f736c3c --- /dev/null +++ b/LoongArch-Change-the-default-value-of-LARCH_CALL_RAT.patch @@ -0,0 +1,41 @@ +From 59824f1062d77d0e02ea82d47415bf95c235de87 Mon Sep 17 00:00:00 2001 +From: chenxiaolong +Date: Thu, 15 Jun 2023 02:46:24 +0000 +Subject: [PATCH 046/124] LoongArch: Change the default value of + LARCH_CALL_RATIO to 6. + +During the regression testing of the LoongArch architecture GCC, it was found +that the tests in the pr90883.C file failed. The problem was modulated and +found that the error was caused by setting the macro LARCH_CALL_RATIO to a too +large value. Combined with the actual LoongArch architecture, the different +thresholds for meeting the test conditions were tested using the engineering method +(SPEC CPU 2006), and the results showed that its optimal threshold should be set +to 6. + +gcc/ChangeLog: + + * config/loongarch/loongarch.h (LARCH_CALL_RATIO): Modify the value + of macro LARCH_CALL_RATIO on LoongArch to make it perform optimally. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index 44ebadfaa..0e35d4dec 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -1073,7 +1073,7 @@ typedef struct { + /* The base cost of a memcpy call, for MOVE_RATIO and friends. These + values were determined experimentally by benchmarking with CSiBE. + */ +-#define LARCH_CALL_RATIO 8 ++#define LARCH_CALL_RATIO 6 + + /* Any loop-based implementation of cpymemsi will have at least + LARCH_MAX_MOVE_OPS_PER_LOOP_ITER memory-to-memory +-- +2.33.0 + diff --git a/LoongArch-Change-the-value-of-branch_cost-from-2-to-.patch b/LoongArch-Change-the-value-of-branch_cost-from-2-to-.patch new file mode 100644 index 0000000000000000000000000000000000000000..7b92012534ad57a905f10ffc49236411d5c279e4 --- /dev/null +++ b/LoongArch-Change-the-value-of-branch_cost-from-2-to-.patch @@ -0,0 +1,69 @@ +From 7e843ed8da168a05eb04eee0b14cbe681bf798fe Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Wed, 13 Sep 2023 11:01:34 +0800 +Subject: [PATCH 123/124] LoongArch: Change the value of branch_cost from 2 to + 6. + +gcc/ChangeLog: + + * config/loongarch/loongarch-def.c: Modify the default value of + branch_cost. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/cmov_ii.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch-def.c | 4 ++-- + gcc/testsuite/gcc.target/loongarch/cmov_ii.c | 15 +++++++++++++++ + 2 files changed, 17 insertions(+), 2 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/cmov_ii.c + +diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c +index d29d5f001..eeb32dbf6 100644 +--- a/gcc/config/loongarch/loongarch-def.c ++++ b/gcc/config/loongarch/loongarch-def.c +@@ -85,7 +85,7 @@ loongarch_cpu_align[N_TUNE_TYPES] = { + .int_mult_di = COSTS_N_INSNS (1), \ + .int_div_si = COSTS_N_INSNS (4), \ + .int_div_di = COSTS_N_INSNS (6), \ +- .branch_cost = 2, \ ++ .branch_cost = 6, \ + .memory_latency = 4 + + /* The following properties cannot be looked up directly using "cpucfg". +@@ -118,7 +118,7 @@ loongarch_rtx_cost_optimize_size = { + .int_mult_di = 4, + .int_div_si = 4, + .int_div_di = 4, +- .branch_cost = 2, ++ .branch_cost = 6, + .memory_latency = 4, + }; + +diff --git a/gcc/testsuite/gcc.target/loongarch/cmov_ii.c b/gcc/testsuite/gcc.target/loongarch/cmov_ii.c +new file mode 100644 +index 000000000..21b468e8a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/cmov_ii.c +@@ -0,0 +1,15 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2" } */ ++/* { dg-final { scan-assembler "test:.*xor.*maskeqz.*masknez.*or.*" } } */ ++ ++extern void foo_ii (int *, int *, int *, int *); ++ ++int ++test (void) ++{ ++ int a, b; ++ int c, d, out; ++ foo_ii (&a, &b, &c, &d); ++ out = a == b ? c : d; ++ return out; ++} +-- +2.33.0 + diff --git a/LoongArch-Change-the-value-of-macro-TRY_EMPTY_VM_SPA.patch b/LoongArch-Change-the-value-of-macro-TRY_EMPTY_VM_SPA.patch new file mode 100644 index 0000000000000000000000000000000000000000..c8840b82d9b2daa640fe13d5d9caf79203cd4efc --- /dev/null +++ b/LoongArch-Change-the-value-of-macro-TRY_EMPTY_VM_SPA.patch @@ -0,0 +1,49 @@ +From 6e9265e571a63deb2584704a0b088a6d67ec8af5 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Mon, 20 Feb 2023 16:47:11 +0800 +Subject: [PATCH 037/124] LoongArch: Change the value of macro + TRY_EMPTY_VM_SPACE from 0x8000000000 to 0x1000000000. + +The PCH mechanism first tries to map the .gch file to the virtual memory +space pointed to by TRY_EMPTY_VM_SPACE during the compilation process. + +The original value of TRY_EMPTY_VM_SPACE macro is 0x8000000000, +but like la464 only has 40 bits of virtual address space, this value +just exceeds the address range. + +If we want to support chips with less than 40 bits virtual addresses, +then the value of this macro needs to be set small. I think setting +this value small will increase the probability of virtual address +mapping failure. And the purpose of pch is to make compilation faster, +but I think we rarely compile on embedded systems. So this situation +may not be within our consideration. + +So change the value of this macro to 0x1000000000. + +gcc/ChangeLog: + + * config/host-linux.cc (TRY_EMPTY_VM_SPACE): Modify the value of + the macro to 0x1000000000. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/host-linux.cc | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/config/host-linux.cc b/gcc/config/host-linux.cc +index 817d3c087..d93cfc064 100644 +--- a/gcc/config/host-linux.cc ++++ b/gcc/config/host-linux.cc +@@ -99,7 +99,7 @@ + #elif defined(__riscv) && defined (__LP64__) + # define TRY_EMPTY_VM_SPACE 0x1000000000 + #elif defined(__loongarch__) && defined(__LP64__) +-# define TRY_EMPTY_VM_SPACE 0x8000000000 ++# define TRY_EMPTY_VM_SPACE 0x1000000000 + #else + # define TRY_EMPTY_VM_SPACE 0 + #endif +-- +2.33.0 + diff --git a/LoongArch-Define-the-macro-ASM_PREFERRED_EH_DATA_FOR.patch b/LoongArch-Define-the-macro-ASM_PREFERRED_EH_DATA_FOR.patch new file mode 100644 index 0000000000000000000000000000000000000000..f5b0e6135e2dee7a2ed137b6ada7c7445fac8d43 --- /dev/null +++ b/LoongArch-Define-the-macro-ASM_PREFERRED_EH_DATA_FOR.patch @@ -0,0 +1,139 @@ +From 05c1df09c70cd0ed48f0644890f69a0128b17a98 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Fri, 29 Jul 2022 09:44:52 +0800 +Subject: [PATCH 008/124] LoongArch: Define the macro + ASM_PREFERRED_EH_DATA_FORMAT by checking the assembler's support for eh_frame + encoding. + +.eh_frame DW_EH_PE_pcrel encoding format is not supported by gas <= 2.39. +Check if the assembler support DW_EH_PE_PCREL encoding and define .eh_frame +encoding type. + +gcc/ChangeLog: + + * config.in: Regenerate. + * config/loongarch/loongarch.h (ASM_PREFERRED_EH_DATA_FORMAT): + Select the value of the macro definition according to whether + HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT is defined. + * configure: Regenerate. + * configure.ac: Reinstate HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config.in | 8 +++++++- + gcc/config/loongarch/loongarch.h | 5 +++++ + gcc/configure | 34 ++++++++++++++++++++++++++++++++ + gcc/configure.ac | 8 ++++++++ + 4 files changed, 54 insertions(+), 1 deletion(-) + +diff --git a/gcc/config.in b/gcc/config.in +index 64c27c9cf..67ce422f2 100644 +--- a/gcc/config.in ++++ b/gcc/config.in +@@ -404,13 +404,19 @@ + #endif + + ++/* Define if your assembler supports eh_frame pcrel encoding. */ ++#ifndef USED_FOR_TARGET ++#undef HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT ++#endif ++ ++ + /* Define if your assembler supports the R_PPC64_ENTRY relocation. */ + #ifndef USED_FOR_TARGET + #undef HAVE_AS_ENTRY_MARKERS + #endif + + +-/* Define if your assembler supports explicit relocations. */ ++/* Define if your assembler supports explicit relocation. */ + #ifndef USED_FOR_TARGET + #undef HAVE_AS_EXPLICIT_RELOCS + #endif +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index 12f209047..a52a81adf 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -1130,8 +1130,13 @@ struct GTY (()) machine_function + }; + #endif + ++#ifdef HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT ++#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ ++ (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4) ++#else + #define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ + (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_absptr) ++#endif + + /* Do emit .note.GNU-stack by default. */ + #ifndef NEED_INDICATE_EXEC_STACK +diff --git a/gcc/configure b/gcc/configure +index 840eddc7c..3788e240a 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -28857,6 +28857,40 @@ if test $gcc_cv_as_loongarch_explicit_relocs = yes; then + + $as_echo "#define HAVE_AS_EXPLICIT_RELOCS 1" >>confdefs.h + ++fi ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for eh_frame pcrel encoding support" >&5 ++$as_echo_n "checking assembler for eh_frame pcrel encoding support... " >&6; } ++if ${gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support=no ++ if test x$gcc_cv_as != x; then ++ $as_echo '.cfi_startproc ++ .cfi_personality 0x9b,a ++ .cfi_lsda 0x1b,b ++ .cfi_endproc' > conftest.s ++ if { ac_try='$gcc_cv_as $gcc_cv_as_flags -o conftest.o conftest.s >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; } ++ then ++ gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support=yes ++ else ++ echo "configure: failed program was" >&5 ++ cat conftest.s >&5 ++ fi ++ rm -f conftest.o conftest.s ++ fi ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support" >&5 ++$as_echo "$gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support" >&6; } ++if test $gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support = yes; then ++ ++$as_echo "#define HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT 1" >>confdefs.h ++ + fi + + ;; +diff --git a/gcc/configure.ac b/gcc/configure.ac +index 975c852c6..1c376e0d4 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -5324,6 +5324,14 @@ x: + [a:pcalau12i $t0,%pc_hi20(a)],, + [AC_DEFINE(HAVE_AS_EXPLICIT_RELOCS, 1, + [Define if your assembler supports explicit relocation.])]) ++ gcc_GAS_CHECK_FEATURE([eh_frame pcrel encoding support], ++ gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support,, ++ [.cfi_startproc ++ .cfi_personality 0x9b,a ++ .cfi_lsda 0x1b,b ++ .cfi_endproc],, ++ [AC_DEFINE(HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT, 1, ++ [Define if your assembler supports eh_frame pcrel encoding.])]) + ;; + s390*-*-*) + gcc_GAS_CHECK_FEATURE([.gnu_attribute support], +-- +2.33.0 + diff --git a/LoongArch-Don-t-add-crtfastmath.o-for-shared.patch b/LoongArch-Don-t-add-crtfastmath.o-for-shared.patch new file mode 100644 index 0000000000000000000000000000000000000000..88250406e851e01279d00031d38cd7706dcc469a --- /dev/null +++ b/LoongArch-Don-t-add-crtfastmath.o-for-shared.patch @@ -0,0 +1,34 @@ +From 2e19311d1bf4f932f5e67f6866123b895b12c97f Mon Sep 17 00:00:00 2001 +From: Richard Biener +Date: Fri, 13 Jan 2023 09:01:12 +0100 +Subject: [PATCH 035/124] LoongArch: Don't add crtfastmath.o for -shared + +Don't add crtfastmath.o for -shared to avoid altering the FP +environment when loading a shared library. + + PR target/55522 + * config/loongarch/gnu-user.h (GNU_USER_TARGET_MATHFILE_SPEC): + Don't add crtfastmath.o for -shared. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/gnu-user.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h +index c5b1afe53..1dc6add62 100644 +--- a/gcc/config/loongarch/gnu-user.h ++++ b/gcc/config/loongarch/gnu-user.h +@@ -49,7 +49,7 @@ along with GCC; see the file COPYING3. If not see + /* Similar to standard Linux, but adding -ffast-math support. */ + #undef GNU_USER_TARGET_MATHFILE_SPEC + #define GNU_USER_TARGET_MATHFILE_SPEC \ +- "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s}" ++ "%{Ofast|ffast-math|funsafe-math-optimizations:%{!shared:crtfastmath.o%s}}" + + #undef LIB_SPEC + #define LIB_SPEC GNU_USER_TARGET_LIB_SPEC +-- +2.33.0 + diff --git a/LoongArch-Enable-free-starting-at-O2.patch b/LoongArch-Enable-free-starting-at-O2.patch new file mode 100644 index 0000000000000000000000000000000000000000..7e6bbfb7d69f2c44bf6d1b919a57f3e4d1c00ffb --- /dev/null +++ b/LoongArch-Enable-free-starting-at-O2.patch @@ -0,0 +1,71 @@ +From 0369836718ffb25ac64c135e748f409302068a56 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Mon, 28 Aug 2023 11:30:21 +0800 +Subject: [PATCH 052/124] LoongArch: Enable '-free' starting at -O2. + +gcc/ChangeLog: + + * common/config/loongarch/loongarch-common.cc: + Enable '-free' on O2 and above. + * doc/invoke.texi: Modify the description information + of the '-free' compilation option and add the LoongArch + description. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/sign-extend.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../config/loongarch/loongarch-common.cc | 1 + + .../gcc.target/loongarch/sign-extend.c | 25 +++++++++++++++++++ + 2 files changed, 26 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/sign-extend.c + +diff --git a/gcc/common/config/loongarch/loongarch-common.cc b/gcc/common/config/loongarch/loongarch-common.cc +index f8b4660fa..309fcb280 100644 +--- a/gcc/common/config/loongarch/loongarch-common.cc ++++ b/gcc/common/config/loongarch/loongarch-common.cc +@@ -35,6 +35,7 @@ static const struct default_options loongarch_option_optimization_table[] = + { + { OPT_LEVELS_ALL, OPT_fasynchronous_unwind_tables, NULL, 1 }, + { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 }, ++ { OPT_LEVELS_2_PLUS, OPT_free, NULL, 1 }, + { OPT_LEVELS_NONE, 0, NULL, 0 } + }; + +diff --git a/gcc/testsuite/gcc.target/loongarch/sign-extend.c b/gcc/testsuite/gcc.target/loongarch/sign-extend.c +new file mode 100644 +index 000000000..3f339d06b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/sign-extend.c +@@ -0,0 +1,25 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O2" } */ ++/* { dg-final { scan-assembler-times "slli.w" 1 } } */ ++ ++extern int PL_savestack_ix; ++extern int PL_regsize; ++extern int PL_savestack_max; ++void Perl_savestack_grow_cnt (int need); ++extern void Perl_croak (char *); ++ ++int ++S_regcppush(int parenfloor) ++{ ++ int retval = PL_savestack_ix; ++ int paren_elems_to_push = (PL_regsize - parenfloor) * 4; ++ int p; ++ ++ if (paren_elems_to_push < 0) ++ Perl_croak ("panic: paren_elems_to_push < 0"); ++ ++ if (PL_savestack_ix + (paren_elems_to_push + 6) > PL_savestack_max) ++ Perl_savestack_grow_cnt (paren_elems_to_push + 6); ++ ++ return retval; ++} +-- +2.33.0 + diff --git a/LoongArch-Enable-fsched-pressure-by-default-at-O1-an.patch b/LoongArch-Enable-fsched-pressure-by-default-at-O1-an.patch new file mode 100644 index 0000000000000000000000000000000000000000..52641c5d12d5b675149903980745d9a2b7df8fb5 --- /dev/null +++ b/LoongArch-Enable-fsched-pressure-by-default-at-O1-an.patch @@ -0,0 +1,33 @@ +From a9f72e237d5c176e4ef8ba03a8b4ee5c5daa25fb Mon Sep 17 00:00:00 2001 +From: Guo Jie +Date: Fri, 8 Sep 2023 10:00:21 +0800 +Subject: [PATCH 071/124] LoongArch: Enable -fsched-pressure by default at -O1 + and higher. + +gcc/ChangeLog: + + * common/config/loongarch/loongarch-common.cc: + (default_options loongarch_option_optimization_table): + Default to -fsched-pressure. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/common/config/loongarch/loongarch-common.cc | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/gcc/common/config/loongarch/loongarch-common.cc b/gcc/common/config/loongarch/loongarch-common.cc +index 309fcb280..c8bc5718d 100644 +--- a/gcc/common/config/loongarch/loongarch-common.cc ++++ b/gcc/common/config/loongarch/loongarch-common.cc +@@ -36,6 +36,7 @@ static const struct default_options loongarch_option_optimization_table[] = + { OPT_LEVELS_ALL, OPT_fasynchronous_unwind_tables, NULL, 1 }, + { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 }, + { OPT_LEVELS_2_PLUS, OPT_free, NULL, 1 }, ++ { OPT_LEVELS_1_PLUS, OPT_fsched_pressure, NULL, 1 }, + { OPT_LEVELS_NONE, 0, NULL, 0 } + }; + +-- +2.33.0 + diff --git a/LoongArch-Enable-shrink-wrapping.patch b/LoongArch-Enable-shrink-wrapping.patch new file mode 100644 index 0000000000000000000000000000000000000000..7d8302559d8671b7f96b1d2d704722685440ca4f --- /dev/null +++ b/LoongArch-Enable-shrink-wrapping.patch @@ -0,0 +1,309 @@ +From e86c9ece7ae922fe80017ba2ffe22f6267531682 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Sun, 23 Apr 2023 20:52:22 +0800 +Subject: [PATCH 045/124] LoongArch: Enable shrink wrapping + +This commit implements the target macros for shrink wrapping of function +prologues/epilogues shrink wrapping on LoongArch. + +Bootstrapped and regtested on loongarch64-linux-gnu. I don't have an +access to SPEC CPU so I hope the reviewer can perform a benchmark to see +if there is real benefit. + +gcc/ChangeLog: + + * config/loongarch/loongarch.h (struct machine_function): Add + reg_is_wrapped_separately array for register wrapping + information. + * config/loongarch/loongarch.cc + (loongarch_get_separate_components): New function. + (loongarch_components_for_bb): Likewise. + (loongarch_disqualify_components): Likewise. + (loongarch_process_components): Likewise. + (loongarch_emit_prologue_components): Likewise. + (loongarch_emit_epilogue_components): Likewise. + (loongarch_set_handled_components): Likewise. + (TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS): Define. + (TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB): Likewise. + (TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS): Likewise. + (TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS): Likewise. + (TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS): Likewise. + (TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS): Likewise. + (loongarch_for_each_saved_reg): Skip registers that are wrapped + separately. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/shrink-wrap.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.cc | 179 +++++++++++++++++- + gcc/config/loongarch/loongarch.h | 2 + + .../gcc.target/loongarch/shrink-wrap.c | 19 ++ + 3 files changed, 197 insertions(+), 3 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/shrink-wrap.c + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index d3c6f22ad..4c0f393b6 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -64,6 +64,7 @@ along with GCC; see the file COPYING3. If not see + #include "builtins.h" + #include "rtl-iter.h" + #include "opts.h" ++#include "function-abi.h" + + /* This file should be included last. */ + #include "target-def.h" +@@ -1014,19 +1015,23 @@ loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset, + for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) + if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) + { +- loongarch_save_restore_reg (word_mode, regno, offset, fn); ++ if (!cfun->machine->reg_is_wrapped_separately[regno]) ++ loongarch_save_restore_reg (word_mode, regno, offset, fn); ++ + offset -= UNITS_PER_WORD; + } + + /* This loop must iterate over the same space as its companion in + loongarch_compute_frame_info. */ + offset = cfun->machine->frame.fp_sp_offset - sp_offset; ++ machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode; ++ + for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) + if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST)) + { +- machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode; ++ if (!cfun->machine->reg_is_wrapped_separately[regno]) ++ loongarch_save_restore_reg (word_mode, regno, offset, fn); + +- loongarch_save_restore_reg (mode, regno, offset, fn); + offset -= GET_MODE_SIZE (mode); + } + } +@@ -6630,6 +6635,151 @@ loongarch_asan_shadow_offset (void) + return TARGET_64BIT ? (HOST_WIDE_INT_1 << 46) : 0; + } + ++static sbitmap ++loongarch_get_separate_components (void) ++{ ++ HOST_WIDE_INT offset; ++ sbitmap components = sbitmap_alloc (FIRST_PSEUDO_REGISTER); ++ bitmap_clear (components); ++ offset = cfun->machine->frame.gp_sp_offset; ++ ++ /* The stack should be aligned to 16-bytes boundary, so we can make the use ++ of ldptr instructions. */ ++ gcc_assert (offset % UNITS_PER_WORD == 0); ++ ++ for (unsigned int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) ++ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) ++ { ++ /* We can wrap general registers saved at [sp, sp + 32768) using the ++ ldptr/stptr instructions. For large offsets a pseudo register ++ might be needed which cannot be created during the shrink ++ wrapping pass. ++ ++ TODO: This may need a revise when we add LA32 as ldptr.w is not ++ guaranteed available by the manual. */ ++ if (offset < 32768) ++ bitmap_set_bit (components, regno); ++ ++ offset -= UNITS_PER_WORD; ++ } ++ ++ offset = cfun->machine->frame.fp_sp_offset; ++ for (unsigned int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) ++ if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST)) ++ { ++ /* We can only wrap FP registers with imm12 offsets. For large ++ offsets a pseudo register might be needed which cannot be ++ created during the shrink wrapping pass. */ ++ if (IMM12_OPERAND (offset)) ++ bitmap_set_bit (components, regno); ++ ++ offset -= UNITS_PER_FPREG; ++ } ++ ++ /* Don't mess with the hard frame pointer. */ ++ if (frame_pointer_needed) ++ bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM); ++ ++ bitmap_clear_bit (components, RETURN_ADDR_REGNUM); ++ ++ return components; ++} ++ ++static sbitmap ++loongarch_components_for_bb (basic_block bb) ++{ ++ /* Registers are used in a bb if they are in the IN, GEN, or KILL sets. */ ++ auto_bitmap used; ++ bitmap_copy (used, DF_LIVE_IN (bb)); ++ bitmap_ior_into (used, &DF_LIVE_BB_INFO (bb)->gen); ++ bitmap_ior_into (used, &DF_LIVE_BB_INFO (bb)->kill); ++ ++ sbitmap components = sbitmap_alloc (FIRST_PSEUDO_REGISTER); ++ bitmap_clear (components); ++ ++ function_abi_aggregator callee_abis; ++ rtx_insn *insn; ++ FOR_BB_INSNS (bb, insn) ++ if (CALL_P (insn)) ++ callee_abis.note_callee_abi (insn_callee_abi (insn)); ++ ++ HARD_REG_SET extra_caller_saves = ++ callee_abis.caller_save_regs (*crtl->abi); ++ ++ for (unsigned int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) ++ if (!fixed_regs[regno] ++ && !crtl->abi->clobbers_full_reg_p (regno) ++ && (TEST_HARD_REG_BIT (extra_caller_saves, regno) || ++ bitmap_bit_p (used, regno))) ++ bitmap_set_bit (components, regno); ++ ++ for (unsigned int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) ++ if (!fixed_regs[regno] ++ && !crtl->abi->clobbers_full_reg_p (regno) ++ && (TEST_HARD_REG_BIT (extra_caller_saves, regno) || ++ bitmap_bit_p (used, regno))) ++ bitmap_set_bit (components, regno); ++ ++ return components; ++} ++ ++static void ++loongarch_disqualify_components (sbitmap, edge, sbitmap, bool) ++{ ++ /* Do nothing. */ ++} ++ ++static void ++loongarch_process_components (sbitmap components, loongarch_save_restore_fn fn) ++{ ++ HOST_WIDE_INT offset = cfun->machine->frame.gp_sp_offset; ++ ++ for (unsigned int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) ++ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) ++ { ++ if (bitmap_bit_p (components, regno)) ++ loongarch_save_restore_reg (word_mode, regno, offset, fn); ++ ++ offset -= UNITS_PER_WORD; ++ } ++ ++ offset = cfun->machine->frame.fp_sp_offset; ++ machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode; ++ ++ for (unsigned int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) ++ if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST)) ++ { ++ if (bitmap_bit_p (components, regno)) ++ loongarch_save_restore_reg (mode, regno, offset, fn); ++ ++ offset -= UNITS_PER_FPREG; ++ } ++} ++ ++static void ++loongarch_emit_prologue_components (sbitmap components) ++{ ++ loongarch_process_components (components, loongarch_save_reg); ++} ++ ++static void ++loongarch_emit_epilogue_components (sbitmap components) ++{ ++ loongarch_process_components (components, loongarch_restore_reg); ++} ++ ++static void ++loongarch_set_handled_components (sbitmap components) ++{ ++ for (unsigned int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) ++ if (bitmap_bit_p (components, regno)) ++ cfun->machine->reg_is_wrapped_separately[regno] = true; ++ ++ for (unsigned int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) ++ if (bitmap_bit_p (components, regno)) ++ cfun->machine->reg_is_wrapped_separately[regno] = true; ++} ++ + /* Initialize the GCC target structure. */ + #undef TARGET_ASM_ALIGNED_HI_OP + #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t" +@@ -6827,6 +6977,29 @@ loongarch_asan_shadow_offset (void) + #undef TARGET_ASAN_SHADOW_OFFSET + #define TARGET_ASAN_SHADOW_OFFSET loongarch_asan_shadow_offset + ++#undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS ++#define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS \ ++ loongarch_get_separate_components ++ ++#undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB ++#define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB loongarch_components_for_bb ++ ++#undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS ++#define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS \ ++ loongarch_disqualify_components ++ ++#undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS ++#define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS \ ++ loongarch_emit_prologue_components ++ ++#undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS ++#define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS \ ++ loongarch_emit_epilogue_components ++ ++#undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS ++#define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS \ ++ loongarch_set_handled_components ++ + struct gcc_target targetm = TARGET_INITIALIZER; + + #include "gt-loongarch.h" +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index af24bfa01..44ebadfaa 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -1147,6 +1147,8 @@ struct GTY (()) machine_function + /* The current frame information, calculated by loongarch_compute_frame_info. + */ + struct loongarch_frame_info frame; ++ ++ bool reg_is_wrapped_separately[FIRST_PSEUDO_REGISTER]; + }; + #endif + +diff --git a/gcc/testsuite/gcc.target/loongarch/shrink-wrap.c b/gcc/testsuite/gcc.target/loongarch/shrink-wrap.c +new file mode 100644 +index 000000000..1431536c5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/shrink-wrap.c +@@ -0,0 +1,19 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O -fshrink-wrap" } */ ++ ++/* We should not save anything before checking the value of x. */ ++/* { dg-final { scan-assembler-not "st(ptr)?\\\.\[dw\].*b(eq|ne)z" } } */ ++ ++int ++foo (int x) ++{ ++ __asm__ ("nop" :); ++ if (x) ++ { ++ __asm__ ("" ::: "s0", "s1"); ++ return x; ++ } ++ ++ __asm__ ("" ::: "s2", "s3"); ++ return 0; ++} +-- +2.33.0 + diff --git a/LoongArch-Fix-MUSL_DYNAMIC_LINKER.patch b/LoongArch-Fix-MUSL_DYNAMIC_LINKER.patch new file mode 100644 index 0000000000000000000000000000000000000000..53d5dfeabf1a806dd18f105b9742fb01b49e45ad --- /dev/null +++ b/LoongArch-Fix-MUSL_DYNAMIC_LINKER.patch @@ -0,0 +1,43 @@ +From 3db61acfbaa773568fad2bc31d950c6d9b3729b0 Mon Sep 17 00:00:00 2001 +From: Peng Fan +Date: Wed, 19 Apr 2023 16:23:42 +0800 +Subject: [PATCH 044/124] LoongArch: Fix MUSL_DYNAMIC_LINKER + +The system based on musl has no '/lib64', so change it. + +https://wiki.musl-libc.org/guidelines-for-distributions.html, +"Multilib/multi-arch" section of this introduces it. + +gcc/ + * config/loongarch/gnu-user.h (MUSL_DYNAMIC_LINKER): Redefine. + +Signed-off-by: Peng Fan +Suggested-by: Xi Ruoyao +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/gnu-user.h | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h +index 1dc6add62..44e4f2575 100644 +--- a/gcc/config/loongarch/gnu-user.h ++++ b/gcc/config/loongarch/gnu-user.h +@@ -33,9 +33,14 @@ along with GCC; see the file COPYING3. If not see + #define GLIBC_DYNAMIC_LINKER \ + "/lib" ABI_GRLEN_SPEC "/ld-linux-loongarch-" ABI_SPEC ".so.1" + ++#define MUSL_ABI_SPEC \ ++ "%{mabi=lp64d:-lp64d}" \ ++ "%{mabi=lp64f:-lp64f}" \ ++ "%{mabi=lp64s:-lp64s}" ++ + #undef MUSL_DYNAMIC_LINKER + #define MUSL_DYNAMIC_LINKER \ +- "/lib" ABI_GRLEN_SPEC "/ld-musl-loongarch-" ABI_SPEC ".so.1" ++ "/lib/ld-musl-loongarch" ABI_GRLEN_SPEC MUSL_ABI_SPEC ".so.1" + + #undef GNU_USER_TARGET_LINK_SPEC + #define GNU_USER_TARGET_LINK_SPEC \ +-- +2.33.0 + diff --git a/LoongArch-Fix-bug-in-loongarch_emit_stack_tie-PR1104.patch b/LoongArch-Fix-bug-in-loongarch_emit_stack_tie-PR1104.patch new file mode 100644 index 0000000000000000000000000000000000000000..23f46aec0fd6304c89dca9a6dc5fcaefd104273c --- /dev/null +++ b/LoongArch-Fix-bug-in-loongarch_emit_stack_tie-PR1104.patch @@ -0,0 +1,43 @@ +From 7c8fc6b414dc1718e71e0d05c7a78498e06eb499 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Thu, 29 Jun 2023 19:30:59 +0800 +Subject: [PATCH 053/124] LoongArch: Fix bug in loongarch_emit_stack_tie + [PR110484]. + +Which may result in implicit references to $fp when frame_pointer_needed is false, +causing regs_ever_live[$fp] to be true when $fp is not explicitly used, +resulting in $fp being used as the target replacement register in the rnreg pass. + +The bug originates from SPEC2017 541.leela_r(-flto). + +gcc/ChangeLog: + + PR target/110484 + * config/loongarch/loongarch.cc (loongarch_emit_stack_tie): Use the + frame_pointer_needed to determine whether to use the $fp register. + +Co-authored-by: Guo Jie +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.cc | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index caacfa8a3..7b48e3216 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -1109,7 +1109,9 @@ loongarch_first_stack_step (struct loongarch_frame_info *frame) + static void + loongarch_emit_stack_tie (void) + { +- emit_insn (gen_stack_tie (Pmode, stack_pointer_rtx, hard_frame_pointer_rtx)); ++ emit_insn (gen_stack_tie (Pmode, stack_pointer_rtx, ++ frame_pointer_needed ? hard_frame_pointer_rtx ++ : stack_pointer_rtx)); + } + + #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP) +-- +2.33.0 + diff --git a/LoongArch-Fix-bug-of-optab-di3_fake.patch b/LoongArch-Fix-bug-of-optab-di3_fake.patch new file mode 100644 index 0000000000000000000000000000000000000000..df1874aec9377b934a031500be86e2e9e4714e93 --- /dev/null +++ b/LoongArch-Fix-bug-of-optab-di3_fake.patch @@ -0,0 +1,123 @@ +From df1df2e7b7e27bd9fba77f572d74d833aff4a202 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Mon, 11 Sep 2023 16:20:29 +0800 +Subject: [PATCH 122/124] LoongArch: Fix bug of 'di3_fake'. + + PR target/111334 + +gcc/ChangeLog: + + * config/loongarch/loongarch.md: Fix bug of 'di3_fake'. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/pr111334.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.md | 20 ++++++---- + gcc/testsuite/gcc.target/loongarch/pr111334.c | 39 +++++++++++++++++++ + 2 files changed, 52 insertions(+), 7 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr111334.c + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 264cd325c..7746116e6 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -72,6 +72,9 @@ + UNSPEC_LUI_H_HI12 + UNSPEC_TLS_LOW + ++ ;; Fake div.w[u] mod.w[u] ++ UNSPEC_FAKE_ANY_DIV ++ + UNSPEC_SIBCALL_VALUE_MULTIPLE_INTERNAL_1 + UNSPEC_CALL_VALUE_MULTIPLE_INTERNAL_1 + ]) +@@ -900,7 +903,7 @@ + (match_operand:GPR 2 "register_operand")))] + "" + { +- if (GET_MODE (operands[0]) == SImode) ++ if (GET_MODE (operands[0]) == SImode && TARGET_64BIT) + { + rtx reg1 = gen_reg_rtx (DImode); + rtx reg2 = gen_reg_rtx (DImode); +@@ -920,9 +923,9 @@ + }) + + (define_insn "*3" +- [(set (match_operand:GPR 0 "register_operand" "=r,&r,&r") +- (any_div:GPR (match_operand:GPR 1 "register_operand" "r,r,0") +- (match_operand:GPR 2 "register_operand" "r,r,r")))] ++ [(set (match_operand:X 0 "register_operand" "=r,&r,&r") ++ (any_div:X (match_operand:X 1 "register_operand" "r,r,0") ++ (match_operand:X 2 "register_operand" "r,r,r")))] + "" + { + return loongarch_output_division (".\t%0,%1,%2", operands); +@@ -938,9 +941,12 @@ + (define_insn "di3_fake" + [(set (match_operand:DI 0 "register_operand" "=r,&r,&r") + (sign_extend:DI +- (any_div:SI (match_operand:DI 1 "register_operand" "r,r,0") +- (match_operand:DI 2 "register_operand" "r,r,r"))))] +- "" ++ (unspec:SI ++ [(subreg:SI ++ (any_div:DI (match_operand:DI 1 "register_operand" "r,r,0") ++ (match_operand:DI 2 "register_operand" "r,r,r")) 0)] ++ UNSPEC_FAKE_ANY_DIV)))] ++ "TARGET_64BIT" + { + return loongarch_output_division (".w\t%0,%1,%2", operands); + } +diff --git a/gcc/testsuite/gcc.target/loongarch/pr111334.c b/gcc/testsuite/gcc.target/loongarch/pr111334.c +new file mode 100644 +index 000000000..47366afcb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/pr111334.c +@@ -0,0 +1,39 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2" } */ ++ ++unsigned ++util_next_power_of_two (unsigned x) ++{ ++ return (1 << __builtin_clz (x - 1)); ++} ++ ++extern int create_vec_from_array (void); ++ ++struct ac_shader_args { ++ struct { ++ unsigned char offset; ++ unsigned char size; ++ } args[384]; ++}; ++ ++struct isel_context { ++ const struct ac_shader_args* args; ++ int arg_temps[384]; ++}; ++ ++ ++void ++add_startpgm (struct isel_context* ctx, unsigned short arg_count) ++{ ++ ++ for (unsigned i = 0, arg = 0; i < arg_count; i++) ++ { ++ unsigned size = ctx->args->args[i].size; ++ unsigned reg = ctx->args->args[i].offset; ++ ++ if (reg % ( 4 < util_next_power_of_two (size) ++ ? 4 : util_next_power_of_two (size))) ++ ctx->arg_temps[i] = create_vec_from_array (); ++ } ++} ++ +-- +2.33.0 + diff --git a/LoongArch-Fix-pr106828-by-define-hook-TARGET_ASAN_SH.patch b/LoongArch-Fix-pr106828-by-define-hook-TARGET_ASAN_SH.patch new file mode 100644 index 0000000000000000000000000000000000000000..20cbb0479f0349ba7a21826498b9a496b9230526 --- /dev/null +++ b/LoongArch-Fix-pr106828-by-define-hook-TARGET_ASAN_SH.patch @@ -0,0 +1,69 @@ +From a70fe51d9813d490a89cbc8da1ae4b040bf8b37e Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Wed, 7 Sep 2022 11:25:45 +0800 +Subject: [PATCH 017/124] LoongArch: Fix pr106828 by define hook + TARGET_ASAN_SHADOW_OFFSET in loongarch backend [PR106828]. + +gcc/ChangeLog: + + PR target/106828 + * config/loongarch/loongarch.cc (loongarch_asan_shadow_offset): New. + (TARGET_ASAN_SHADOW_OFFSET): New. + +gcc/testsuite/ChangeLog: + + PR target/106828 + * g++.target/loongarch/pr106828.C: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.cc | 13 +++++++++++++ + gcc/testsuite/g++.target/loongarch/pr106828.C | 4 ++++ + 2 files changed, 17 insertions(+) + create mode 100644 gcc/testsuite/g++.target/loongarch/pr106828.C + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index c9187bf81..98c0e26cd 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -6466,6 +6466,16 @@ loongarch_use_anchors_for_symbol_p (const_rtx symbol) + return default_use_anchors_for_symbol_p (symbol); + } + ++/* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */ ++ ++static unsigned HOST_WIDE_INT ++loongarch_asan_shadow_offset (void) ++{ ++ /* We only have libsanitizer support for LOONGARCH64 at present. ++ This value is taken from the file libsanitizer/asan/asan_mappint.h. */ ++ return TARGET_64BIT ? (HOST_WIDE_INT_1 << 46) : 0; ++} ++ + /* Initialize the GCC target structure. */ + #undef TARGET_ASM_ALIGNED_HI_OP + #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t" +@@ -6660,6 +6670,9 @@ loongarch_use_anchors_for_symbol_p (const_rtx symbol) + #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P + #define TARGET_USE_ANCHORS_FOR_SYMBOL_P loongarch_use_anchors_for_symbol_p + ++#undef TARGET_ASAN_SHADOW_OFFSET ++#define TARGET_ASAN_SHADOW_OFFSET loongarch_asan_shadow_offset ++ + struct gcc_target targetm = TARGET_INITIALIZER; + + #include "gt-loongarch.h" +diff --git a/gcc/testsuite/g++.target/loongarch/pr106828.C b/gcc/testsuite/g++.target/loongarch/pr106828.C +new file mode 100644 +index 000000000..190c1db71 +--- /dev/null ++++ b/gcc/testsuite/g++.target/loongarch/pr106828.C +@@ -0,0 +1,4 @@ ++/* { dg-do-preprocess } */ ++/* { dg-options "-mabi=lp64d -fsanitize=address" } */ ++ ++/* Tests whether the compiler supports compile option '-fsanitize=address'. */ +-- +2.33.0 + diff --git a/LoongArch-Fix-unintentional-bash-ism-in-r14-3665.patch b/LoongArch-Fix-unintentional-bash-ism-in-r14-3665.patch new file mode 100644 index 0000000000000000000000000000000000000000..7d6b160d5208cb79800a4e5f9e6035c6a3eb62c6 --- /dev/null +++ b/LoongArch-Fix-unintentional-bash-ism-in-r14-3665.patch @@ -0,0 +1,31 @@ +From 8e5c9f349877af07dde4804974d47625c1292956 Mon Sep 17 00:00:00 2001 +From: Yang Yujie +Date: Wed, 6 Sep 2023 17:57:47 +0800 +Subject: [PATCH 070/124] LoongArch: Fix unintentional bash-ism in r14-3665. + +gcc/ChangeLog: + + * config.gcc: remove non-POSIX syntax "<<<". + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config.gcc | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/config.gcc b/gcc/config.gcc +index 19f584344..57e724080 100644 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -5263,7 +5263,7 @@ case "${target}" in + if test x${parse_state} = x"abi-base"; then + # Base ABI type + case ${component} in +- lp64d | lp64f | lp64s) elem_tmp="ABI_BASE_$(tr a-z A-Z <<< ${component}),";; ++ lp64d | lp64f | lp64s) elem_tmp="ABI_BASE_$(echo ${component} | tr a-z A-Z),";; + *) + echo "Unknown base ABI \"${component}\" in --with-multilib-list." 1>&2 + exit 1 +-- +2.33.0 + diff --git a/LoongArch-Fix-unintentionally-breakage-in-r14-3665.patch b/LoongArch-Fix-unintentionally-breakage-in-r14-3665.patch new file mode 100644 index 0000000000000000000000000000000000000000..93c427d1ea4be4869e2417fd0180664f80e048da --- /dev/null +++ b/LoongArch-Fix-unintentionally-breakage-in-r14-3665.patch @@ -0,0 +1,34 @@ +From 8de6f5e1aad2a1ff85ff3a4b732055d625c61139 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Tue, 5 Sep 2023 20:02:51 +0800 +Subject: [PATCH 067/124] LoongArch: Fix unintentionally breakage in r14-3665 + +Fix a build failure with no system assembler or system old assembler. + +gcc/ChangeLog: + + * config/loongarch/loongarch-opts.h (HAVE_AS_EXPLICIT_RELOCS): + Define to 0 if not defined yet. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch-opts.h | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h +index e3f9b6f99..0d148e43b 100644 +--- a/gcc/config/loongarch/loongarch-opts.h ++++ b/gcc/config/loongarch/loongarch-opts.h +@@ -93,4 +93,8 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target, + while -m[no]-memcpy imposes a global constraint. */ + #define TARGET_DO_OPTIMIZE_BLOCK_MOVE_P loongarch_do_optimize_block_move_p() + ++#ifndef HAVE_AS_EXPLICIT_RELOCS ++#define HAVE_AS_EXPLICIT_RELOCS 0 ++#endif ++ + #endif /* LOONGARCH_OPTS_H */ +-- +2.33.0 + diff --git a/LoongArch-Fix-up-memcpy-vec-3.c-test-case.patch b/LoongArch-Fix-up-memcpy-vec-3.c-test-case.patch new file mode 100644 index 0000000000000000000000000000000000000000..492f37f47ba0113cc310a402d536494533974b19 --- /dev/null +++ b/LoongArch-Fix-up-memcpy-vec-3.c-test-case.patch @@ -0,0 +1,33 @@ +From 78896e68f50164af7827e8da01a7220764d1e296 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Sat, 9 Sep 2023 16:18:06 +0800 +Subject: [PATCH 075/124] LoongArch: Fix up memcpy-vec-3.c test case + +The generic code will split 16-byte copy into two 8-byte copies, so the +vector code wouldn't be used even if -mno-strict-align. This +contradicted with the purpose of this test case. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/memcpy-vec-3.c: Increase the amount of + copied bytes to 32. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c +index 233ed2150..db2ea510b 100644 +--- a/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c ++++ b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c +@@ -3,4 +3,4 @@ + /* { dg-final { scan-assembler-not "vst" } } */ + + extern char a[], b[]; +-void test() { __builtin_memcpy(a, b, 16); } ++void test() { __builtin_memcpy(a, b, 32); } +-- +2.33.0 + diff --git a/LoongArch-Fixed-a-bug-in-the-loongarch-architecture-.patch b/LoongArch-Fixed-a-bug-in-the-loongarch-architecture-.patch new file mode 100644 index 0000000000000000000000000000000000000000..3f1a351173706f6c4330fa300bb3fa1969005c28 --- /dev/null +++ b/LoongArch-Fixed-a-bug-in-the-loongarch-architecture-.patch @@ -0,0 +1,43 @@ +From 80ed9ab39d9b1b08ad9d054f16d65b2a249a89e5 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Wed, 12 Oct 2022 11:02:11 +0800 +Subject: [PATCH 022/124] LoongArch: Fixed a bug in the loongarch architecture + of libitm package. + +Add a soft floating point condition to the register recovery part of the code. + +libitm/ChangeLog: + + * config/loongarch/sjlj.S: Add a soft floating point condition to the + register recovery part of the code. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + libitm/config/loongarch/sjlj.S | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/libitm/config/loongarch/sjlj.S b/libitm/config/loongarch/sjlj.S +index a5f9fadde..f896e400e 100644 +--- a/libitm/config/loongarch/sjlj.S ++++ b/libitm/config/loongarch/sjlj.S +@@ -104,6 +104,8 @@ GTM_longjmp: + GPR_L $s7, $r5, 10*SZ_GPR + GPR_L $s8, $r5, 11*SZ_GPR + ++#if !defined(__loongarch_soft_float) ++ /* Callee-saved scratch FPRs (f24-f31) */ + FPR_L $f24, $r5, 12*SZ_GPR + 0*SZ_FPR + FPR_L $f25, $r5, 12*SZ_GPR + 1*SZ_FPR + FPR_L $f26, $r5, 12*SZ_GPR + 2*SZ_FPR +@@ -112,6 +114,7 @@ GTM_longjmp: + FPR_L $f29, $r5, 12*SZ_GPR + 5*SZ_FPR + FPR_L $f30, $r5, 12*SZ_GPR + 6*SZ_FPR + FPR_L $f31, $r5, 12*SZ_GPR + 7*SZ_FPR ++#endif + + GPR_L $r7, $r5, 2*SZ_GPR + GPR_L $fp, $r5, 0*SZ_GPR +-- +2.33.0 + diff --git a/LoongArch-Fixed-a-compilation-failure-with-c-in-inli.patch b/LoongArch-Fixed-a-compilation-failure-with-c-in-inli.patch new file mode 100644 index 0000000000000000000000000000000000000000..52ca4bc01473d243acc95abd3f6c358925d40111 --- /dev/null +++ b/LoongArch-Fixed-a-compilation-failure-with-c-in-inli.patch @@ -0,0 +1,182 @@ +From 49a63dbaf3b4296f0b1f8a0e11790cc3455aeec7 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Wed, 18 Jan 2023 11:06:56 +0800 +Subject: [PATCH 034/124] LoongArch: Fixed a compilation failure with '%c' in + inline assembly [PR107731]. + +Co-authored-by: Yang Yujie + + PR target/107731 + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc (loongarch_classify_address): + Add precessint for CONST_INT. + (loongarch_print_operand_reloc): Operand modifier 'c' is supported. + (loongarch_print_operand): Increase the processing of '%c'. + * doc/extend.texi: Adds documents for LoongArch operand modifiers. + And port the public operand modifiers information to this document. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/tst-asm-const.c: Moved to... + * gcc.target/loongarch/pr107731.c: ...here. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.cc | 14 +++++ + gcc/doc/extend.texi | 51 +++++++++++++++++-- + .../loongarch/{tst-asm-const.c => pr107731.c} | 6 +-- + 3 files changed, 64 insertions(+), 7 deletions(-) + rename gcc/testsuite/gcc.target/loongarch/{tst-asm-const.c => pr107731.c} (78%) + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index e59edc4cd..1a4686f03 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -2074,6 +2074,11 @@ loongarch_classify_address (struct loongarch_address_info *info, rtx x, + return (loongarch_valid_base_register_p (info->reg, mode, strict_p) + && loongarch_valid_lo_sum_p (info->symbol_type, mode, + info->offset)); ++ case CONST_INT: ++ /* Small-integer addresses don't occur very often, but they ++ are legitimate if $r0 is a valid base register. */ ++ info->type = ADDRESS_CONST_INT; ++ return IMM12_OPERAND (INTVAL (x)); + + default: + return false; +@@ -4932,6 +4937,7 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part, + + 'A' Print a _DB suffix if the memory model requires a release. + 'b' Print the address of a memory operand, without offset. ++ 'c' Print an integer. + 'C' Print the integer branch condition for comparison OP. + 'd' Print CONST_INT OP in decimal. + 'F' Print the FPU branch condition for comparison OP. +@@ -4978,6 +4984,14 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + fputs ("_db", file); + break; + ++ case 'c': ++ if (CONST_INT_P (op)) ++ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op)); ++ else ++ output_operand_lossage ("unsupported operand for code '%c'", letter); ++ ++ break; ++ + case 'C': + loongarch_print_int_branch_condition (file, code, letter); + break; +diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi +index da2840c23..3c101ca89 100644 +--- a/gcc/doc/extend.texi ++++ b/gcc/doc/extend.texi +@@ -10414,8 +10414,10 @@ ensures that modifying @var{a} does not affect the address referenced by + is undefined if @var{a} is modified before using @var{b}. + + @code{asm} supports operand modifiers on operands (for example @samp{%k2} +-instead of simply @samp{%2}). Typically these qualifiers are hardware +-dependent. The list of supported modifiers for x86 is found at ++instead of simply @samp{%2}). @ref{GenericOperandmodifiers, ++Generic Operand modifiers} lists the modifiers that are available ++on all targets. Other modifiers are hardware dependent. ++For example, the list of supported modifiers for x86 is found at + @ref{x86Operandmodifiers,x86 Operand modifiers}. + + If the C code that follows the @code{asm} makes no use of any of the output +@@ -10683,8 +10685,10 @@ optimizers may discard the @code{asm} statement as unneeded + (see @ref{Volatile}). + + @code{asm} supports operand modifiers on operands (for example @samp{%k2} +-instead of simply @samp{%2}). Typically these qualifiers are hardware +-dependent. The list of supported modifiers for x86 is found at ++instead of simply @samp{%2}). @ref{GenericOperandmodifiers, ++Generic Operand modifiers} lists the modifiers that are available ++on all targets. Other modifiers are hardware dependent. ++For example, the list of supported modifiers for x86 is found at + @ref{x86Operandmodifiers,x86 Operand modifiers}. + + In this example using the fictitious @code{combine} instruction, the +@@ -11036,6 +11040,30 @@ lab: + @} + @end example + ++@anchor{GenericOperandmodifiers} ++@subsubsection Generic Operand Modifiers ++@noindent ++The following table shows the modifiers supported by all targets and their effects: ++ ++@multitable {Modifier} {Description} {Example} ++@headitem Modifier @tab Description @tab Example ++@item @code{c} ++@tab Require a constant operand and print the constant expression with no punctuation. ++@tab @code{%c0} ++@item @code{n} ++@tab Like @samp{%c} except that the value of the constant is negated before printing. ++@tab @code{%n0} ++@item @code{a} ++@tab Substitute a memory reference, with the actual operand treated as the address. ++This may be useful when outputting a ``load address'' instruction, because ++often the assembler syntax for such an instruction requires you to write the ++operand as if it were a memory reference. ++@tab @code{%a0} ++@item @code{l} ++@tab Print the label name with no punctuation. ++@tab @code{%l0} ++@end multitable ++ + @anchor{x86Operandmodifiers} + @subsubsection x86 Operand Modifiers + +@@ -11386,6 +11414,21 @@ constant. Used to select the specified bit position. + @item @code{x} @tab Equivialent to @code{X}, but only for pointers. + @end multitable + ++@anchor{loongarchOperandmodifiers} ++@subsubsection LoongArch Operand Modifiers ++ ++The list below describes the supported modifiers and their effects for LoongArch. ++ ++@multitable @columnfractions .10 .90 ++@headitem Modifier @tab Description ++@item @code{d} @tab Same as @code{c}. ++@item @code{i} @tab Print the character ''@code{i}'' if the operand is not a register. ++@item @code{m} @tab Same as @code{c}, but the printed value is @code{operand - 1}. ++@item @code{X} @tab Print a constant integer operand in hexadecimal. ++@item @code{z} @tab Print the operand in its unmodified form, followed by a comma. ++@end multitable ++ ++ + @lowersections + @include md.texi + @raisesections +diff --git a/gcc/testsuite/gcc.target/loongarch/tst-asm-const.c b/gcc/testsuite/gcc.target/loongarch/pr107731.c +similarity index 78% +rename from gcc/testsuite/gcc.target/loongarch/tst-asm-const.c +rename to gcc/testsuite/gcc.target/loongarch/pr107731.c +index 2e04b99e3..80d84c48c 100644 +--- a/gcc/testsuite/gcc.target/loongarch/tst-asm-const.c ++++ b/gcc/testsuite/gcc.target/loongarch/pr107731.c +@@ -1,13 +1,13 @@ +-/* Test asm const. */ + /* { dg-do compile } */ + /* { dg-final { scan-assembler-times "foo:.*\\.long 1061109567.*\\.long 52" 1 } } */ ++ + int foo () + { + __asm__ volatile ( + "foo:" + "\n\t" +- ".long %a0\n\t" +- ".long %a1\n\t" ++ ".long %c0\n\t" ++ ".long %c1\n\t" + : + :"i"(0x3f3f3f3f), "i"(52) + : +-- +2.33.0 + diff --git a/LoongArch-Fixed-a-typo-in-the-comment-information-of.patch b/LoongArch-Fixed-a-typo-in-the-comment-information-of.patch new file mode 100644 index 0000000000000000000000000000000000000000..7013c90fbd4752e8f9dc52c455a90d12f47b7613 --- /dev/null +++ b/LoongArch-Fixed-a-typo-in-the-comment-information-of.patch @@ -0,0 +1,33 @@ +From cbb5f181544e35b119fee4ed150bec24eee7179c Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Wed, 28 Sep 2022 16:35:06 +0800 +Subject: [PATCH 020/124] LoongArch: Fixed a typo in the comment information of + the function loongarch_asan_shadow_offset. + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc (loongarch_asan_shadow_offset): + Fixed typo in "asan_mapping.h". + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.cc | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 98c0e26cd..e9ba3374e 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -6472,7 +6472,7 @@ static unsigned HOST_WIDE_INT + loongarch_asan_shadow_offset (void) + { + /* We only have libsanitizer support for LOONGARCH64 at present. +- This value is taken from the file libsanitizer/asan/asan_mappint.h. */ ++ This value is taken from the file libsanitizer/asan/asan_mapping.h. */ + return TARGET_64BIT ? (HOST_WIDE_INT_1 << 46) : 0; + } + +-- +2.33.0 + diff --git a/LoongArch-Generate-bytepick.-wd-for-suitable-bit-ope.patch b/LoongArch-Generate-bytepick.-wd-for-suitable-bit-ope.patch new file mode 100644 index 0000000000000000000000000000000000000000..1b870196851fa57f1248c7fa352d27776348e156 --- /dev/null +++ b/LoongArch-Generate-bytepick.-wd-for-suitable-bit-ope.patch @@ -0,0 +1,196 @@ +From 9311c0f56086e38fe5e9bf4bbfc2e37d0f18347c Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Fri, 3 Feb 2023 17:06:06 +0800 +Subject: [PATCH 036/124] LoongArch: Generate bytepick.[wd] for suitable bit + operation pattern + +We can use bytepick.[wd] for + + a << (8 * x) | b >> (8 * (sizeof(a) - x)) + +while a and b are uint32_t or uint64_t. This is useful for some cases, +for example: +https://sourceware.org/pipermail/libc-alpha/2023-February/145203.html + +gcc/ChangeLog: + + * config/loongarch/loongarch.md (bytepick_w_ashift_amount): + New define_int_iterator. + (bytepick_d_ashift_amount): Likewise. + (bytepick_imm): New define_int_attr. + (bytepick_w_lshiftrt_amount): Likewise. + (bytepick_d_lshiftrt_amount): Likewise. + (bytepick_w_): New define_insn template. + (bytepick_w__extend): Likewise. + (bytepick_d_): Likewise. + (bytepick_w): Remove unused define_insn. + (bytepick_d): Likewise. + (UNSPEC_BYTEPICK_W): Remove unused unspec. + (UNSPEC_BYTEPICK_D): Likewise. + * config/loongarch/predicates.md (const_0_to_3_operand): + Remove unused define_predicate. + (const_0_to_7_operand): Likewise. + +gcc/testsuite/ChangeLog: + + * g++.target/loongarch/bytepick.C: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.md | 60 ++++++++++++++----- + gcc/config/loongarch/predicates.md | 8 --- + gcc/testsuite/g++.target/loongarch/bytepick.C | 32 ++++++++++ + 3 files changed, 77 insertions(+), 23 deletions(-) + create mode 100644 gcc/testsuite/g++.target/loongarch/bytepick.C + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index f61db66d5..833b94753 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -48,8 +48,6 @@ + UNSPEC_EH_RETURN + + ;; Bit operation +- UNSPEC_BYTEPICK_W +- UNSPEC_BYTEPICK_D + UNSPEC_BITREV_4B + UNSPEC_BITREV_8B + +@@ -544,6 +542,27 @@ + (UNSPEC_FTINTRM "0") + (UNSPEC_FTINTRP "0")]) + ++;; Iterator and attributes for bytepick.d ++(define_int_iterator bytepick_w_ashift_amount [8 16 24]) ++(define_int_attr bytepick_w_lshiftrt_amount [(8 "24") ++ (16 "16") ++ (24 "8")]) ++(define_int_iterator bytepick_d_ashift_amount [8 16 24 32 40 48 56]) ++(define_int_attr bytepick_d_lshiftrt_amount [(8 "56") ++ (16 "48") ++ (24 "40") ++ (32 "32") ++ (40 "24") ++ (48 "16") ++ (56 "8")]) ++(define_int_attr bytepick_imm [(8 "1") ++ (16 "2") ++ (24 "3") ++ (32 "4") ++ (40 "5") ++ (48 "6") ++ (56 "7")]) ++ + ;; + ;; .................... + ;; +@@ -3364,24 +3383,35 @@ + [(set_attr "type" "unknown") + (set_attr "mode" "")]) + +-(define_insn "bytepick_w" ++(define_insn "bytepick_w_" + [(set (match_operand:SI 0 "register_operand" "=r") +- (unspec:SI [(match_operand:SI 1 "register_operand" "r") +- (match_operand:SI 2 "register_operand" "r") +- (match_operand:SI 3 "const_0_to_3_operand" "n")] +- UNSPEC_BYTEPICK_W))] ++ (ior:SI (lshiftrt (match_operand:SI 1 "register_operand" "r") ++ (const_int )) ++ (ashift (match_operand:SI 2 "register_operand" "r") ++ (const_int bytepick_w_ashift_amount))))] + "" +- "bytepick.w\t%0,%1,%2,%z3" ++ "bytepick.w\t%0,%1,%2," + [(set_attr "mode" "SI")]) + +-(define_insn "bytepick_d" ++(define_insn "bytepick_w__extend" + [(set (match_operand:DI 0 "register_operand" "=r") +- (unspec:DI [(match_operand:DI 1 "register_operand" "r") +- (match_operand:DI 2 "register_operand" "r") +- (match_operand:DI 3 "const_0_to_7_operand" "n")] +- UNSPEC_BYTEPICK_D))] +- "" +- "bytepick.d\t%0,%1,%2,%z3" ++ (sign_extend:DI ++ (ior:SI (lshiftrt (match_operand:SI 1 "register_operand" "r") ++ (const_int )) ++ (ashift (match_operand:SI 2 "register_operand" "r") ++ (const_int bytepick_w_ashift_amount)))))] ++ "TARGET_64BIT" ++ "bytepick.w\t%0,%1,%2," ++ [(set_attr "mode" "SI")]) ++ ++(define_insn "bytepick_d_" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ior:DI (lshiftrt (match_operand:DI 1 "register_operand" "r") ++ (const_int )) ++ (ashift (match_operand:DI 2 "register_operand" "r") ++ (const_int bytepick_d_ashift_amount))))] ++ "TARGET_64BIT" ++ "bytepick.d\t%0,%1,%2," + [(set_attr "mode" "DI")]) + + (define_insn "bitrev_4b" +diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md +index 58c3dc226..3c32b2987 100644 +--- a/gcc/config/loongarch/predicates.md ++++ b/gcc/config/loongarch/predicates.md +@@ -91,14 +91,6 @@ + (ior (match_operand 0 "const_1_operand") + (match_operand 0 "register_operand"))) + +-(define_predicate "const_0_to_3_operand" +- (and (match_code "const_int") +- (match_test "IN_RANGE (INTVAL (op), 0, 3)"))) +- +-(define_predicate "const_0_to_7_operand" +- (and (match_code "const_int") +- (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) +- + (define_predicate "lu52i_mask_operand" + (and (match_code "const_int") + (match_test "UINTVAL (op) == 0xfffffffffffff"))) +diff --git a/gcc/testsuite/g++.target/loongarch/bytepick.C b/gcc/testsuite/g++.target/loongarch/bytepick.C +new file mode 100644 +index 000000000..a39e2fa65 +--- /dev/null ++++ b/gcc/testsuite/g++.target/loongarch/bytepick.C +@@ -0,0 +1,32 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -mabi=lp64d" } */ ++/* { dg-final { scan-assembler-times "bytepick.w\t\\\$r4,\\\$r5,\\\$r4" 3 } } */ ++/* { dg-final { scan-assembler-times "bytepick.d\t\\\$r4,\\\$r5,\\\$r4" 7 } } */ ++/* { dg-final { scan-assembler-not "slli.w" } } */ ++ ++template ++T ++merge (T a, T b) ++{ ++ return a << offs | b >> (8 * sizeof (T) - offs); ++} ++ ++using u32 = __UINT32_TYPE__; ++using u64 = __UINT64_TYPE__; ++using i64 = __INT64_TYPE__; ++ ++template u32 merge (u32, u32); ++template u32 merge (u32, u32); ++template u32 merge (u32, u32); ++ ++template u64 merge (u64, u64); ++template u64 merge (u64, u64); ++template u64 merge (u64, u64); ++template u64 merge (u64, u64); ++template u64 merge (u64, u64); ++template u64 merge (u64, u64); ++template u64 merge (u64, u64); ++ ++/* we cannot use bytepick for the following cases */ ++template i64 merge (i64, i64); ++template u64 merge (u64, u64); +-- +2.33.0 + diff --git a/LoongArch-Get-__tls_get_addr-address-through-got-tab.patch b/LoongArch-Get-__tls_get_addr-address-through-got-tab.patch new file mode 100644 index 0000000000000000000000000000000000000000..e1879f7725bbee9b640c870dd2f147bbc1994674 --- /dev/null +++ b/LoongArch-Get-__tls_get_addr-address-through-got-tab.patch @@ -0,0 +1,71 @@ +From a96dee6ba3c916f9a4329b196a0c5a1652fe294f Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Thu, 18 Aug 2022 09:57:14 +0800 +Subject: [PATCH 010/124] LoongArch: Get __tls_get_addr address through got + table when disable plt. + +Fix bug, ICE with tls gd/ld var with -fno-plt. + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc (loongarch_call_tls_get_addr): + Get __tls_get_addr address through got table when disable plt. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/tls-gd-noplt.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.cc | 14 ++++++++++++-- + gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c | 12 ++++++++++++ + 2 files changed, 24 insertions(+), 2 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 1b5af2c7d..76bf55ea4 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -2448,8 +2448,18 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0) + gcc_unreachable (); + } + +- insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol, +- const0_rtx)); ++ if (flag_plt) ++ insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol, ++ const0_rtx)); ++ else ++ { ++ rtx dest = gen_reg_rtx (Pmode); ++ rtx high = gen_reg_rtx (Pmode); ++ loongarch_emit_move (high, gen_rtx_HIGH (Pmode, loongarch_tls_symbol)); ++ emit_insn (gen_ld_from_got (Pmode, dest, high, loongarch_tls_symbol)); ++ insn = emit_call_insn (gen_call_value_internal (v0, dest, const0_rtx)); ++ } ++ + RTL_CONST_CALL_P (insn) = 1; + use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0); + insn = get_insns (); +diff --git a/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c b/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c +new file mode 100644 +index 000000000..32a0acf9b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c +@@ -0,0 +1,12 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fno-plt -mcmodel=normal" } */ ++/* { dg-final { scan-assembler "pcalau12i\t.*%got_pc_hi20\\(__tls_get_addr\\)" } } */ ++ ++__attribute__ ((tls_model ("global-dynamic"))) __thread int a; ++ ++void ++test (void) ++{ ++ a = 10; ++} ++ +-- +2.33.0 + diff --git a/LoongArch-Implement-128-bit-floating-point-functions.patch b/LoongArch-Implement-128-bit-floating-point-functions.patch new file mode 100644 index 0000000000000000000000000000000000000000..5d23401b8bd4cdbe9daa41c91edececeedd49676 --- /dev/null +++ b/LoongArch-Implement-128-bit-floating-point-functions.patch @@ -0,0 +1,204 @@ +From 12ab9eae9e8a5b83c778182f15c6216bcbc3dc36 Mon Sep 17 00:00:00 2001 +From: chenxiaolong +Date: Fri, 1 Sep 2023 11:22:42 +0800 +Subject: [PATCH 054/124] LoongArch: Implement 128-bit floating point functions + in gcc. + +During implementation, float128_type_node is bound with the type "__float128" +so that the compiler can correctly identify the type of the function. The +"q" suffix is associated with the "f128" function, which makes GCC more +flexible to support different user input cases, implementing functions such +as __builtin_{huge_valq, infq, fabsq, copysignq, nanq, nansq}. + +gcc/ChangeLog: + + * config/loongarch/loongarch-builtins.cc (loongarch_init_builtins): + Associate the __float128 type to float128_type_node so that it can + be recognized by the compiler. + * config/loongarch/loongarch-c.cc (loongarch_cpu_cpp_builtins): + Add the flag "FLOAT128_TYPE" to gcc and associate a function + with the suffix "q" to "f128". + * doc/extend.texi:Added support for 128-bit floating-point functions on + the LoongArch architecture. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/math-float-128.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch-builtins.cc | 5 ++ + gcc/config/loongarch/loongarch-c.cc | 11 +++ + gcc/doc/extend.texi | 20 ++++- + .../gcc.target/loongarch/math-float-128.c | 81 +++++++++++++++++++ + 4 files changed, 114 insertions(+), 3 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/math-float-128.c + +diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc +index 64fe11168..cb0ea1664 100644 +--- a/gcc/config/loongarch/loongarch-builtins.cc ++++ b/gcc/config/loongarch/loongarch-builtins.cc +@@ -256,6 +256,11 @@ loongarch_init_builtins (void) + unsigned int i; + tree type; + ++ /* Register the type float128_type_node as a built-in type and ++ give it an alias "__float128". */ ++ (*lang_hooks.types.register_builtin_type) (float128_type_node, ++ "__float128"); ++ + /* Iterate through all of the bdesc arrays, initializing all of the + builtin functions. */ + for (i = 0; i < ARRAY_SIZE (loongarch_builtins); i++) +diff --git a/gcc/config/loongarch/loongarch-c.cc b/gcc/config/loongarch/loongarch-c.cc +index d6e3e19f0..f779a7355 100644 +--- a/gcc/config/loongarch/loongarch-c.cc ++++ b/gcc/config/loongarch/loongarch-c.cc +@@ -99,6 +99,17 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile) + else + builtin_define ("__loongarch_frlen=0"); + ++ /* Add support for FLOAT128_TYPE on the LoongArch architecture. */ ++ builtin_define ("__FLOAT128_TYPE__"); ++ ++ /* Map the old _Float128 'q' builtins into the new 'f128' builtins. */ ++ builtin_define ("__builtin_fabsq=__builtin_fabsf128"); ++ builtin_define ("__builtin_copysignq=__builtin_copysignf128"); ++ builtin_define ("__builtin_nanq=__builtin_nanf128"); ++ builtin_define ("__builtin_nansq=__builtin_nansf128"); ++ builtin_define ("__builtin_infq=__builtin_inff128"); ++ builtin_define ("__builtin_huge_valq=__builtin_huge_valf128"); ++ + /* Native Data Sizes. */ + builtin_define_with_int_value ("_LOONGARCH_SZINT", INT_TYPE_SIZE); + builtin_define_with_int_value ("_LOONGARCH_SZLONG", LONG_TYPE_SIZE); +diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi +index 1d1bac255..bb19d0f27 100644 +--- a/gcc/doc/extend.texi ++++ b/gcc/doc/extend.texi +@@ -1085,10 +1085,10 @@ types. + As an extension, GNU C and GNU C++ support additional floating + types, which are not supported by all targets. + @itemize @bullet +-@item @code{__float128} is available on i386, x86_64, IA-64, and +-hppa HP-UX, as well as on PowerPC GNU/Linux targets that enable ++@item @code{__float128} is available on i386, x86_64, IA-64, LoongArch ++and hppa HP-UX, as well as on PowerPC GNU/Linux targets that enable + the vector scalar (VSX) instruction set. @code{__float128} supports +-the 128-bit floating type. On i386, x86_64, PowerPC, and IA-64 ++the 128-bit floating type. On i386, x86_64, PowerPC, LoongArch and IA-64, + other than HP-UX, @code{__float128} is an alias for @code{_Float128}. + On hppa and IA-64 HP-UX, @code{__float128} is an alias for @code{long + double}. +@@ -16257,6 +16257,20 @@ function you need to include @code{larchintrin.h}. + void __break (imm0_32767) + @end smallexample + ++Additional built-in functions are available for LoongArch family ++processors to efficiently use 128-bit floating-point (__float128) ++values. ++ ++The following are the basic built-in functions supported. ++@smallexample ++__float128 __builtin_fabsq (__float128); ++__float128 __builtin_copysignq (__float128, __float128); ++__float128 __builtin_infq (void); ++__float128 __builtin_huge_valq (void); ++__float128 __builtin_nanq (void); ++__float128 __builtin_nansq (void); ++@end smallexample ++ + @node MIPS DSP Built-in Functions + @subsection MIPS DSP Built-in Functions + +diff --git a/gcc/testsuite/gcc.target/loongarch/math-float-128.c b/gcc/testsuite/gcc.target/loongarch/math-float-128.c +new file mode 100644 +index 000000000..387566a57 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/math-float-128.c +@@ -0,0 +1,81 @@ ++/* { dg-do compile } */ ++/* { dg-options " -march=loongarch64 -O2 " } */ ++/* { dg-final { scan-assembler-not "my_fabsq2:.*\\bl\t%plt\\(__builtin_fabsq\\).*my_fabsq2" } } */ ++/* { dg-final { scan-assembler-not "my_copysignq2:.*\\bl\t%plt\\(__builtin_copysignq\\).*my_copysignq2" } } */ ++/* { dg-final { scan-assembler-not "my_infq2:.*\\bl\t%plt\\(__builtin_infq\\).*my_infq2" } } */ ++/* { dg-final { scan-assembler-not "my_huge_valq2:.*\\bl\t%plt\\(__builtin_huge_valq\\).*my_huge_valq2" } } */ ++/* { dg-final { scan-assembler-not "my_nanq2:.*\\bl\t%plt\\(__builtin_nanq\\).*my_nanq2" } } */ ++/* { dg-final { scan-assembler-not "my_nansq2:.*\\bl\t%plt\\(__builtin_nansq\\).*my_nansq2" } } */ ++ ++__float128 ++my_fabsq1 (__float128 a) ++{ ++ return __builtin_fabsq (a); ++} ++ ++_Float128 ++my_fabsq2 (_Float128 a) ++{ ++ return __builtin_fabsq (a); ++} ++ ++__float128 ++my_copysignq1 (__float128 a, __float128 b) ++{ ++ return __builtin_copysignq (a, b); ++} ++ ++_Float128 ++my_copysignq2 (_Float128 a, _Float128 b) ++{ ++ return __builtin_copysignq (a, b); ++} ++ ++__float128 ++my_infq1 (void) ++{ ++ return __builtin_infq (); ++} ++ ++_Float128 ++my_infq2 (void) ++{ ++ return __builtin_infq (); ++} ++ ++__float128 ++my_huge_valq1 (void) ++{ ++ return __builtin_huge_valq (); ++} ++ ++_Float128 ++my_huge_valq2 (void) ++{ ++ return __builtin_huge_valq (); ++} ++ ++__float128 ++my_nanq1 (void) ++{ ++ return __builtin_nanq (""); ++} ++ ++_Float128 ++my_nanq2 (void) ++{ ++ return __builtin_nanq (""); ++} ++ ++__float128 ++my_nansq1 (void) ++{ ++ return __builtin_nansq (""); ++} ++ ++_Float128 ++my_nansq2 (void) ++{ ++ return __builtin_nansq (""); ++} ++ +-- +2.33.0 + diff --git a/LoongArch-Improve-GAR-store-for-va_list.patch b/LoongArch-Improve-GAR-store-for-va_list.patch new file mode 100644 index 0000000000000000000000000000000000000000..af418a954e91da7a3cbd5532705b108ba509a7ac --- /dev/null +++ b/LoongArch-Improve-GAR-store-for-va_list.patch @@ -0,0 +1,83 @@ +From 4075f299ca6a5d15fdb46f877cbe11b7166a19ff Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Wed, 29 Mar 2023 01:36:09 +0800 +Subject: [PATCH 042/124] LoongArch: Improve GAR store for va_list + +LoongArch backend used to save all GARs for a function with variable +arguments. But sometimes a function only accepts variable arguments for +a purpose like C++ function overloading. For example, POSIX defines +open() as: + + int open(const char *path, int oflag, ...); + +But only two forms are actually used: + + int open(const char *pathname, int flags); + int open(const char *pathname, int flags, mode_t mode); + +So it's obviously a waste to save all 8 GARs in open(). We can use the +cfun->va_list_gpr_size field set by the stdarg pass to only save the +GARs necessary to be saved. + +If the va_list escapes (for example, in fprintf() we pass it to +vfprintf()), stdarg would set cfun->va_list_gpr_size to 255 so we +don't need a special case. + +With this patch, only one GAR ($a2/$r6) is saved in open(). Ideally +even this stack store should be omitted too, but doing so is not trivial +and AFAIK there are no compilers (for any target) performing the "ideal" +optimization here, see https://godbolt.org/z/n1YqWq9c9. + +Bootstrapped and regtested on loongarch64-linux-gnu. Ok for trunk +(GCC 14 or now)? + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc + (loongarch_setup_incoming_varargs): Don't save more GARs than + cfun->va_list_gpr_size / UNITS_PER_WORD. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/va_arg.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/testsuite/gcc.target/loongarch/va_arg.c | 24 +++++++++++++++++++++ + 1 file changed, 24 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/va_arg.c + +diff --git a/gcc/testsuite/gcc.target/loongarch/va_arg.c b/gcc/testsuite/gcc.target/loongarch/va_arg.c +new file mode 100644 +index 000000000..980c96d0e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/va_arg.c +@@ -0,0 +1,24 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2" } */ ++ ++/* Technically we shouldn't save any register for this function: it should be ++ compiled as if it accepts 3 named arguments. But AFAIK no compilers can ++ achieve this "perfect" optimization now, so just ensure we are using the ++ knowledge provided by stdarg pass and we won't save GARs impossible to be ++ accessed with __builtin_va_arg () when the va_list does not escape. */ ++ ++/* { dg-final { scan-assembler-not "st.*r7" } } */ ++ ++int ++test (int a0, ...) ++{ ++ void *arg; ++ int a1, a2; ++ ++ __builtin_va_start (arg, a0); ++ a1 = __builtin_va_arg (arg, int); ++ a2 = __builtin_va_arg (arg, int); ++ __builtin_va_end (arg); ++ ++ return a0 + a1 + a2; ++} +-- +2.33.0 + diff --git a/LoongArch-Improve-cpymemsi-expansion-PR109465.patch b/LoongArch-Improve-cpymemsi-expansion-PR109465.patch new file mode 100644 index 0000000000000000000000000000000000000000..3c8d3aa884c6ad41ca67caa9c0dfa9004966ee47 --- /dev/null +++ b/LoongArch-Improve-cpymemsi-expansion-PR109465.patch @@ -0,0 +1,339 @@ +From 33fff578e7df7aa7e236efc6c9c85c595918d86a Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Wed, 12 Apr 2023 11:45:48 +0000 +Subject: [PATCH 043/124] LoongArch: Improve cpymemsi expansion [PR109465] + +We'd been generating really bad block move sequences which is recently +complained by kernel developers who tried __builtin_memcpy. To improve +it: + +1. Take the advantage of -mno-strict-align. When it is set, set mode + size to UNITS_PER_WORD regardless of the alignment. +2. Half the mode size when (block size) % (mode size) != 0, instead of + falling back to ld.bu/st.b at once. +3. Limit the length of block move sequence considering the number of + instructions, not the size of block. When -mstrict-align is set and + the block is not aligned, the old size limit for straight-line + implementation (64 bytes) was definitely too large (we don't have 64 + registers anyway). + +Change since v1: add a comment about the calculation of num_reg. + +gcc/ChangeLog: + + PR target/109465 + * config/loongarch/loongarch-protos.h + (loongarch_expand_block_move): Add a parameter as alignment RTX. + * config/loongarch/loongarch.h: + (LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER): Remove. + (LARCH_MAX_MOVE_BYTES_STRAIGHT): Remove. + (LARCH_MAX_MOVE_OPS_PER_LOOP_ITER): Define. + (LARCH_MAX_MOVE_OPS_STRAIGHT): Define. + (MOVE_RATIO): Use LARCH_MAX_MOVE_OPS_PER_LOOP_ITER instead of + LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER. + * config/loongarch/loongarch.cc (loongarch_expand_block_move): + Take the alignment from the parameter, but set it to + UNITS_PER_WORD if !TARGET_STRICT_ALIGN. Limit the length of + straight-line implementation with LARCH_MAX_MOVE_OPS_STRAIGHT + instead of LARCH_MAX_MOVE_BYTES_STRAIGHT. + (loongarch_block_move_straight): When there are left-over bytes, + half the mode size instead of falling back to byte mode at once. + (loongarch_block_move_loop): Limit the length of loop body with + LARCH_MAX_MOVE_OPS_PER_LOOP_ITER instead of + LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER. + * config/loongarch/loongarch.md (cpymemsi): Pass the alignment + to loongarch_expand_block_move. + +gcc/testsuite/ChangeLog: + + PR target/109465 + * gcc.target/loongarch/pr109465-1.c: New test. + * gcc.target/loongarch/pr109465-2.c: New test. + * gcc.target/loongarch/pr109465-3.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch-protos.h | 2 +- + gcc/config/loongarch/loongarch.cc | 95 +++++++++++-------- + gcc/config/loongarch/loongarch.h | 10 +- + gcc/config/loongarch/loongarch.md | 3 +- + .../gcc.target/loongarch/pr109465-1.c | 9 ++ + .../gcc.target/loongarch/pr109465-2.c | 9 ++ + .../gcc.target/loongarch/pr109465-3.c | 12 +++ + 7 files changed, 91 insertions(+), 49 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr109465-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr109465-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr109465-3.c + +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +index 0a9b47722..3ac3b5e19 100644 +--- a/gcc/config/loongarch/loongarch-protos.h ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -95,7 +95,7 @@ extern void loongarch_expand_conditional_trap (rtx); + #endif + extern void loongarch_set_return_address (rtx, rtx); + extern bool loongarch_move_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int); +-extern bool loongarch_expand_block_move (rtx, rtx, rtx); ++extern bool loongarch_expand_block_move (rtx, rtx, rtx, rtx); + extern bool loongarch_do_optimize_block_move_p (void); + + extern bool loongarch_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT, +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 233dddbac..d3c6f22ad 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -4456,41 +4456,46 @@ loongarch_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED, + Assume that the areas do not overlap. */ + + static void +-loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) ++loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length, ++ HOST_WIDE_INT delta) + { +- HOST_WIDE_INT offset, delta; +- unsigned HOST_WIDE_INT bits; ++ HOST_WIDE_INT offs, delta_cur; + int i; + machine_mode mode; + rtx *regs; + +- bits = MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest))); +- +- mode = int_mode_for_size (bits, 0).require (); +- delta = bits / BITS_PER_UNIT; ++ /* Calculate how many registers we'll need for the block move. ++ We'll emit length / delta move operations with delta as the size ++ first. Then we may still have length % delta bytes not copied. ++ We handle these remaining bytes by move operations with smaller ++ (halfed) sizes. For example, if length = 21 and delta = 8, we'll ++ emit two ld.d/st.d pairs, one ld.w/st.w pair, and one ld.b/st.b ++ pair. For each load/store pair we use a dedicated register to keep ++ the pipeline as populated as possible. */ ++ HOST_WIDE_INT num_reg = length / delta; ++ for (delta_cur = delta / 2; delta_cur != 0; delta_cur /= 2) ++ num_reg += !!(length & delta_cur); + + /* Allocate a buffer for the temporary registers. */ +- regs = XALLOCAVEC (rtx, length / delta); ++ regs = XALLOCAVEC (rtx, num_reg); + +- /* Load as many BITS-sized chunks as possible. Use a normal load if +- the source has enough alignment, otherwise use left/right pairs. */ +- for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) ++ for (delta_cur = delta, i = 0, offs = 0; offs < length; delta_cur /= 2) + { +- regs[i] = gen_reg_rtx (mode); +- loongarch_emit_move (regs[i], adjust_address (src, mode, offset)); +- } ++ mode = int_mode_for_size (delta_cur * BITS_PER_UNIT, 0).require (); + +- for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) +- loongarch_emit_move (adjust_address (dest, mode, offset), regs[i]); ++ for (; offs + delta_cur <= length; offs += delta_cur, i++) ++ { ++ regs[i] = gen_reg_rtx (mode); ++ loongarch_emit_move (regs[i], adjust_address (src, mode, offs)); ++ } ++ } + +- /* Mop up any left-over bytes. */ +- if (offset < length) ++ for (delta_cur = delta, i = 0, offs = 0; offs < length; delta_cur /= 2) + { +- src = adjust_address (src, BLKmode, offset); +- dest = adjust_address (dest, BLKmode, offset); +- move_by_pieces (dest, src, length - offset, +- MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), +- (enum memop_ret) 0); ++ mode = int_mode_for_size (delta_cur * BITS_PER_UNIT, 0).require (); ++ ++ for (; offs + delta_cur <= length; offs += delta_cur, i++) ++ loongarch_emit_move (adjust_address (dest, mode, offs), regs[i]); + } + } + +@@ -4520,10 +4525,11 @@ loongarch_adjust_block_mem (rtx mem, HOST_WIDE_INT length, rtx *loop_reg, + + static void + loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, +- HOST_WIDE_INT bytes_per_iter) ++ HOST_WIDE_INT align) + { + rtx_code_label *label; + rtx src_reg, dest_reg, final_src, test; ++ HOST_WIDE_INT bytes_per_iter = align * LARCH_MAX_MOVE_OPS_PER_LOOP_ITER; + HOST_WIDE_INT leftover; + + leftover = length % bytes_per_iter; +@@ -4543,7 +4549,7 @@ loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, + emit_label (label); + + /* Emit the loop body. */ +- loongarch_block_move_straight (dest, src, bytes_per_iter); ++ loongarch_block_move_straight (dest, src, bytes_per_iter, align); + + /* Move on to the next block. */ + loongarch_emit_move (src_reg, +@@ -4560,7 +4566,7 @@ loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, + + /* Mop up any left-over bytes. */ + if (leftover) +- loongarch_block_move_straight (dest, src, leftover); ++ loongarch_block_move_straight (dest, src, leftover, align); + else + /* Temporary fix for PR79150. */ + emit_insn (gen_nop ()); +@@ -4570,25 +4576,32 @@ loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, + memory reference SRC to memory reference DEST. */ + + bool +-loongarch_expand_block_move (rtx dest, rtx src, rtx length) ++loongarch_expand_block_move (rtx dest, rtx src, rtx r_length, rtx r_align) + { +- int max_move_bytes = LARCH_MAX_MOVE_BYTES_STRAIGHT; ++ if (!CONST_INT_P (r_length)) ++ return false; ++ ++ HOST_WIDE_INT length = INTVAL (r_length); ++ if (length > loongarch_max_inline_memcpy_size) ++ return false; ++ ++ HOST_WIDE_INT align = INTVAL (r_align); ++ ++ if (!TARGET_STRICT_ALIGN || align > UNITS_PER_WORD) ++ align = UNITS_PER_WORD; + +- if (CONST_INT_P (length) +- && INTVAL (length) <= loongarch_max_inline_memcpy_size) ++ if (length <= align * LARCH_MAX_MOVE_OPS_STRAIGHT) + { +- if (INTVAL (length) <= max_move_bytes) +- { +- loongarch_block_move_straight (dest, src, INTVAL (length)); +- return true; +- } +- else if (optimize) +- { +- loongarch_block_move_loop (dest, src, INTVAL (length), +- LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER); +- return true; +- } ++ loongarch_block_move_straight (dest, src, length, align); ++ return true; ++ } ++ ++ if (optimize) ++ { ++ loongarch_block_move_loop (dest, src, length, align); ++ return true; + } ++ + return false; + } + +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index 9d3cd9ca0..af24bfa01 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -1062,13 +1062,13 @@ typedef struct { + + /* The maximum number of bytes that can be copied by one iteration of + a cpymemsi loop; see loongarch_block_move_loop. */ +-#define LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER (UNITS_PER_WORD * 4) ++#define LARCH_MAX_MOVE_OPS_PER_LOOP_ITER 4 + + /* The maximum number of bytes that can be copied by a straight-line + implementation of cpymemsi; see loongarch_block_move_straight. We want + to make sure that any loop-based implementation will iterate at + least twice. */ +-#define LARCH_MAX_MOVE_BYTES_STRAIGHT (LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER * 2) ++#define LARCH_MAX_MOVE_OPS_STRAIGHT (LARCH_MAX_MOVE_OPS_PER_LOOP_ITER * 2) + + /* The base cost of a memcpy call, for MOVE_RATIO and friends. These + values were determined experimentally by benchmarking with CSiBE. +@@ -1076,7 +1076,7 @@ typedef struct { + #define LARCH_CALL_RATIO 8 + + /* Any loop-based implementation of cpymemsi will have at least +- LARCH_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory ++ LARCH_MAX_MOVE_OPS_PER_LOOP_ITER memory-to-memory + moves, so allow individual copies of fewer elements. + + When cpymemsi is not available, use a value approximating +@@ -1087,9 +1087,7 @@ typedef struct { + value of LARCH_CALL_RATIO to take that into account. */ + + #define MOVE_RATIO(speed) \ +- (HAVE_cpymemsi \ +- ? LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD \ +- : CLEAR_RATIO (speed) / 2) ++ (HAVE_cpymemsi ? LARCH_MAX_MOVE_OPS_PER_LOOP_ITER : CLEAR_RATIO (speed) / 2) + + /* For CLEAR_RATIO, when optimizing for size, give a better estimate + of the length of a memset call, but use the default otherwise. */ +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index b2f7c7f78..b23248c33 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -2488,7 +2488,8 @@ + "" + { + if (TARGET_DO_OPTIMIZE_BLOCK_MOVE_P +- && loongarch_expand_block_move (operands[0], operands[1], operands[2])) ++ && loongarch_expand_block_move (operands[0], operands[1], ++ operands[2], operands[3])) + DONE; + else + FAIL; +diff --git a/gcc/testsuite/gcc.target/loongarch/pr109465-1.c b/gcc/testsuite/gcc.target/loongarch/pr109465-1.c +new file mode 100644 +index 000000000..4cd35d139 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/pr109465-1.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -mabi=lp64d -mno-strict-align" } */ ++/* { dg-final { scan-assembler-times "st\\.d|stptr\\.d" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.w|stptr\\.w" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.h" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.b" 1 } } */ ++ ++extern char a[], b[]; ++void test() { __builtin_memcpy(a, b, 15); } +diff --git a/gcc/testsuite/gcc.target/loongarch/pr109465-2.c b/gcc/testsuite/gcc.target/loongarch/pr109465-2.c +new file mode 100644 +index 000000000..703eb951c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/pr109465-2.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -mabi=lp64d -mstrict-align" } */ ++/* { dg-final { scan-assembler-times "st\\.d|stptr\\.d" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.w|stptr\\.w" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.h" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.b" 1 } } */ ++ ++extern long a[], b[]; ++void test() { __builtin_memcpy(a, b, 15); } +diff --git a/gcc/testsuite/gcc.target/loongarch/pr109465-3.c b/gcc/testsuite/gcc.target/loongarch/pr109465-3.c +new file mode 100644 +index 000000000..d6a80659b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/pr109465-3.c +@@ -0,0 +1,12 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -mabi=lp64d -mstrict-align" } */ ++ ++/* Three loop iterations each contains 4 st.b, and 3 st.b after the loop */ ++/* { dg-final { scan-assembler-times "st\\.b" 7 } } */ ++ ++/* { dg-final { scan-assembler-not "st\\.h" } } */ ++/* { dg-final { scan-assembler-not "st\\.w|stptr\\.w" } } */ ++/* { dg-final { scan-assembler-not "st\\.d|stptr\\.d" } } */ ++ ++extern char a[], b[]; ++void test() { __builtin_memcpy(a, b, 15); } +-- +2.33.0 + diff --git a/LoongArch-Libitm-add-LoongArch-support.patch b/LoongArch-Libitm-add-LoongArch-support.patch new file mode 100644 index 0000000000000000000000000000000000000000..f202ea7ed4d662b822409d65c58bb6d7670d54fa --- /dev/null +++ b/LoongArch-Libitm-add-LoongArch-support.patch @@ -0,0 +1,291 @@ +From 7f9f1dd3c87cffeab58150997e22e8fff707646b Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Mon, 26 Sep 2022 09:42:51 +0800 +Subject: [PATCH 019/124] LoongArch: Libitm add LoongArch support. + +Co-Authored-By: Yang Yujie + +libitm/ChangeLog: + + * configure.tgt: Add loongarch support. + * config/loongarch/asm.h: New file. + * config/loongarch/sjlj.S: New file. + * config/loongarch/target.h: New file. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + libitm/config/loongarch/asm.h | 54 +++++++++++++ + libitm/config/loongarch/sjlj.S | 127 +++++++++++++++++++++++++++++++ + libitm/config/loongarch/target.h | 50 ++++++++++++ + libitm/configure.tgt | 2 + + 4 files changed, 233 insertions(+) + create mode 100644 libitm/config/loongarch/asm.h + create mode 100644 libitm/config/loongarch/sjlj.S + create mode 100644 libitm/config/loongarch/target.h + +diff --git a/libitm/config/loongarch/asm.h b/libitm/config/loongarch/asm.h +new file mode 100644 +index 000000000..a8e3304bb +--- /dev/null ++++ b/libitm/config/loongarch/asm.h +@@ -0,0 +1,54 @@ ++/* Copyright (C) 2022 Free Software Foundation, Inc. ++ Contributed by Loongson Co. Ltd. ++ ++ This file is part of the GNU Transactional Memory Library (libitm). ++ ++ Libitm is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3 of the License, or ++ (at your option) any later version. ++ ++ Libitm is distributed in the hope that it will be useful, but WITHOUT ANY ++ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS ++ FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++#ifndef _LA_ASM_H ++#define _LA_ASM_H ++ ++#if defined(__loongarch_lp64) ++# define GPR_L ld.d ++# define GPR_S st.d ++# define SZ_GPR 8 ++# define ADDSP(si) addi.d $sp, $sp, si ++#elif defined(__loongarch64_ilp32) ++# define GPR_L ld.w ++# define GPR_S st.w ++# define SZ_GPR 4 ++# define ADDSP(si) addi.w $sp, $sp, si ++#else ++# error Unsupported GPR size (must be 64-bit or 32-bit). ++#endif ++ ++#if defined(__loongarch_double_float) ++# define FPR_L fld.d ++# define FPR_S fst.d ++# define SZ_FPR 8 ++#elif defined(__loongarch_single_float) ++# define FPR_L fld.s ++# define FPR_S fst.s ++# define SZ_FPR 4 ++#else ++# define SZ_FPR 0 ++#endif ++ ++#endif /* _LA_ASM_H */ +diff --git a/libitm/config/loongarch/sjlj.S b/libitm/config/loongarch/sjlj.S +new file mode 100644 +index 000000000..a5f9fadde +--- /dev/null ++++ b/libitm/config/loongarch/sjlj.S +@@ -0,0 +1,127 @@ ++/* Copyright (C) 2022 Free Software Foundation, Inc. ++ Contributed by Loongson Co. Ltd. ++ ++ This file is part of the GNU Transactional Memory Library (libitm). ++ ++ Libitm is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3 of the License, or ++ (at your option) any later version. ++ ++ Libitm is distributed in the hope that it will be useful, but WITHOUT ANY ++ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS ++ FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++#include "asmcfi.h" ++#include "asm.h" ++ ++ .text ++ .align 2 ++ .global _ITM_beginTransaction ++ .type _ITM_beginTransaction, @function ++ ++_ITM_beginTransaction: ++ cfi_startproc ++ move $r5, $sp ++ ADDSP(-(12*SZ_GPR+8*SZ_FPR)) ++ cfi_adjust_cfa_offset(12*SZ_GPR+8*SZ_FPR) ++ ++ /* Frame Pointer */ ++ GPR_S $fp, $sp, 0*SZ_GPR ++ cfi_rel_offset(22, 0) ++ ++ /* Return Address */ ++ GPR_S $r1, $sp, 1*SZ_GPR ++ cfi_rel_offset(1, SZ_GPR) ++ ++ /* Caller's $sp */ ++ GPR_S $r5, $sp, 2*SZ_GPR ++ ++ /* Callee-saved scratch GPRs (r23-r31) */ ++ GPR_S $s0, $sp, 3*SZ_GPR ++ GPR_S $s1, $sp, 4*SZ_GPR ++ GPR_S $s2, $sp, 5*SZ_GPR ++ GPR_S $s3, $sp, 6*SZ_GPR ++ GPR_S $s4, $sp, 7*SZ_GPR ++ GPR_S $s5, $sp, 8*SZ_GPR ++ GPR_S $s6, $sp, 9*SZ_GPR ++ GPR_S $s7, $sp, 10*SZ_GPR ++ GPR_S $s8, $sp, 11*SZ_GPR ++ ++#if !defined(__loongarch_soft_float) ++ /* Callee-saved scratch FPRs (f24-f31) */ ++ FPR_S $f24, $sp, 12*SZ_GPR + 0*SZ_FPR ++ FPR_S $f25, $sp, 12*SZ_GPR + 1*SZ_FPR ++ FPR_S $f26, $sp, 12*SZ_GPR + 2*SZ_FPR ++ FPR_S $f27, $sp, 12*SZ_GPR + 3*SZ_FPR ++ FPR_S $f28, $sp, 12*SZ_GPR + 4*SZ_FPR ++ FPR_S $f29, $sp, 12*SZ_GPR + 5*SZ_FPR ++ FPR_S $f30, $sp, 12*SZ_GPR + 6*SZ_FPR ++ FPR_S $f31, $sp, 12*SZ_GPR + 7*SZ_FPR ++#endif ++ move $fp, $sp ++ ++ /* Invoke GTM_begin_transaction with the struct we've just built. */ ++ move $r5, $sp ++ bl %plt(GTM_begin_transaction) ++ ++ /* Return. (no call-saved scratch reg needs to be restored here) */ ++ GPR_L $fp, $sp, 0*SZ_GPR ++ cfi_restore(22) ++ GPR_L $r1, $sp, 1*SZ_GPR ++ cfi_restore(1) ++ ++ ADDSP(12*SZ_GPR+8*SZ_FPR) ++ cfi_adjust_cfa_offset(-(12*SZ_GPR+8*SZ_FPR)) ++ ++ jr $r1 ++ cfi_endproc ++ .size _ITM_beginTransaction, . - _ITM_beginTransaction ++ ++ .align 2 ++ .global GTM_longjmp ++ .hidden GTM_longjmp ++ .type GTM_longjmp, @function ++ ++GTM_longjmp: ++ cfi_startproc ++ GPR_L $s0, $r5, 3*SZ_GPR ++ GPR_L $s1, $r5, 4*SZ_GPR ++ GPR_L $s2, $r5, 5*SZ_GPR ++ GPR_L $s3, $r5, 6*SZ_GPR ++ GPR_L $s4, $r5, 7*SZ_GPR ++ GPR_L $s5, $r5, 8*SZ_GPR ++ GPR_L $s6, $r5, 9*SZ_GPR ++ GPR_L $s7, $r5, 10*SZ_GPR ++ GPR_L $s8, $r5, 11*SZ_GPR ++ ++ FPR_L $f24, $r5, 12*SZ_GPR + 0*SZ_FPR ++ FPR_L $f25, $r5, 12*SZ_GPR + 1*SZ_FPR ++ FPR_L $f26, $r5, 12*SZ_GPR + 2*SZ_FPR ++ FPR_L $f27, $r5, 12*SZ_GPR + 3*SZ_FPR ++ FPR_L $f28, $r5, 12*SZ_GPR + 4*SZ_FPR ++ FPR_L $f29, $r5, 12*SZ_GPR + 5*SZ_FPR ++ FPR_L $f30, $r5, 12*SZ_GPR + 6*SZ_FPR ++ FPR_L $f31, $r5, 12*SZ_GPR + 7*SZ_FPR ++ ++ GPR_L $r7, $r5, 2*SZ_GPR ++ GPR_L $fp, $r5, 0*SZ_GPR ++ GPR_L $r1, $r5, 1*SZ_GPR ++ cfi_def_cfa(5, 0) ++ move $sp, $r7 ++ jr $r1 ++ cfi_endproc ++ .size GTM_longjmp, . - GTM_longjmp ++ ++#ifdef __linux__ ++.section .note.GNU-stack, "", @progbits ++#endif +diff --git a/libitm/config/loongarch/target.h b/libitm/config/loongarch/target.h +new file mode 100644 +index 000000000..0c5cf3ada +--- /dev/null ++++ b/libitm/config/loongarch/target.h +@@ -0,0 +1,50 @@ ++/* Copyright (C) 2022 Free Software Foundation, Inc. ++ Contributed by Loongson Co. Ltd. ++ ++ This file is part of the GNU Transactional Memory Library (libitm). ++ ++ Libitm is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3 of the License, or ++ (at your option) any later version. ++ ++ Libitm is distributed in the hope that it will be useful, but WITHOUT ANY ++ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS ++ FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++namespace GTM HIDDEN { ++ ++typedef struct gtm_jmpbuf ++ { ++ long int fp; /* Frame Pointer: r22 */ ++ long int pc; /* Return Address: r1 */ ++ void *cfa; /* CFA: r3 */ ++ long int gpr[9]; /* Callee-saved scratch GPRs: r23(s0)-r31(s8) */ ++ ++ /* Callee-saved scratch FPRs: f24-f31 */ ++#if defined(__loongarch_double_float) ++ double fpr[8]; ++#elif defined(__loongarch_single_float) ++ float fpr[8]; ++#endif ++ } gtm_jmpbuf; ++ ++#define HW_CACHELINE_SIZE 128 ++ ++static inline void ++cpu_relax (void) ++{ ++ __asm__ volatile ("" : : : "memory"); ++} ++ ++} // namespace GTM +diff --git a/libitm/configure.tgt b/libitm/configure.tgt +index 06e90973e..4c0e78cff 100644 +--- a/libitm/configure.tgt ++++ b/libitm/configure.tgt +@@ -80,6 +80,8 @@ EOF + ARCH=x86 + ;; + ++ loongarch*) ARCH=loongarch ;; ++ + sh*) ARCH=sh ;; + + sparc) +-- +2.33.0 + diff --git a/LoongArch-Modify-the-output-message-string-of-the-wa.patch b/LoongArch-Modify-the-output-message-string-of-the-wa.patch new file mode 100644 index 0000000000000000000000000000000000000000..392f935a87dbb2463e1a3e779ea3e67ed4bf73ad --- /dev/null +++ b/LoongArch-Modify-the-output-message-string-of-the-wa.patch @@ -0,0 +1,37 @@ +From 83d6cfbbdc41766af9d7941d00204cc0f26ff40c Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Tue, 26 Jul 2022 21:03:52 +0800 +Subject: [PATCH 005/124] LoongArch: Modify the output message string of the + warning. + +Fix bug for "error: spurious trailing punctuation sequence '.' in format [-Werror=format-diag]". + +gcc/ChangeLog: + + * config/loongarch/loongarch-opts.cc: Modify the output message string + of the warning. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch-opts.cc | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc +index fc477bfd4..3f70943de 100644 +--- a/gcc/config/loongarch/loongarch-opts.cc ++++ b/gcc/config/loongarch/loongarch-opts.cc +@@ -378,8 +378,8 @@ fallback: + t.cmodel = constrained.cmodel ? opt_cmodel : CMODEL_NORMAL; + if (t.cmodel != CMODEL_NORMAL) + { +- warning (0, "%qs is not supported, now cmodel is set to 'normal'.", +- loongarch_cmodel_strings[t.cmodel]); ++ warning (0, "%qs is not supported, now cmodel is set to %qs", ++ loongarch_cmodel_strings[t.cmodel], "normal"); + t.cmodel = CMODEL_NORMAL; + } + +-- +2.33.0 + diff --git a/LoongArch-Optimize-additions-with-immediates.patch b/LoongArch-Optimize-additions-with-immediates.patch new file mode 100644 index 0000000000000000000000000000000000000000..69acae4cf336d00d38f93a6d0024805ff0d20c1d --- /dev/null +++ b/LoongArch-Optimize-additions-with-immediates.patch @@ -0,0 +1,445 @@ +From a31baa1e437fa4acedfaf03db91c1d6e5ce78013 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Sun, 2 Apr 2023 21:37:49 +0800 +Subject: [PATCH 041/124] LoongArch: Optimize additions with immediates + +1. Use addu16i.d for TARGET_64BIT and suitable immediates. +2. Split one addition with immediate into two addu16i.d or addi.{d/w} + instructions if possible. This can avoid using a temp register w/o + increase the count of instructions. + +Inspired by https://reviews.llvm.org/D143710 and +https://reviews.llvm.org/D147222. + +Bootstrapped and regtested on loongarch64-linux-gnu. Ok for GCC 14? + +gcc/ChangeLog: + + * config/loongarch/loongarch-protos.h + (loongarch_addu16i_imm12_operand_p): New function prototype. + (loongarch_split_plus_constant): Likewise. + * config/loongarch/loongarch.cc + (loongarch_addu16i_imm12_operand_p): New function. + (loongarch_split_plus_constant): Likewise. + * config/loongarch/loongarch.h (ADDU16I_OPERAND): New macro. + (DUAL_IMM12_OPERAND): Likewise. + (DUAL_ADDU16I_OPERAND): Likewise. + * config/loongarch/constraints.md (La, Lb, Lc, Ld, Le): New + constraint. + * config/loongarch/predicates.md (const_dual_imm12_operand): New + predicate. + (const_addu16i_operand): Likewise. + (const_addu16i_imm12_di_operand): Likewise. + (const_addu16i_imm12_si_operand): Likewise. + (plus_di_operand): Likewise. + (plus_si_operand): Likewise. + (plus_si_extend_operand): Likewise. + * config/loongarch/loongarch.md (add3): Convert to + define_insn_and_split. Use plus__operand predicate + instead of arith_operand. Add alternatives for La, Lb, Lc, Ld, + and Le constraints. + (*addsi3_extended): Convert to define_insn_and_split. Use + plus_si_extend_operand instead of arith_operand. Add + alternatives for La and Le alternatives. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/add-const.c: New test. + * gcc.target/loongarch/stack-check-cfa-1.c: Adjust for stack + frame size change. + * gcc.target/loongarch/stack-check-cfa-2.c: Likewise. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/constraints.md | 46 ++++++++++++- + gcc/config/loongarch/loongarch-protos.h | 2 + + gcc/config/loongarch/loongarch.cc | 44 +++++++++++++ + gcc/config/loongarch/loongarch.h | 19 ++++++ + gcc/config/loongarch/loongarch.md | 66 +++++++++++++++---- + gcc/config/loongarch/predicates.md | 36 ++++++++++ + .../gcc.target/loongarch/add-const.c | 45 +++++++++++++ + .../gcc.target/loongarch/stack-check-cfa-1.c | 2 +- + .../gcc.target/loongarch/stack-check-cfa-2.c | 2 +- + 9 files changed, 246 insertions(+), 16 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/add-const.c + +diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md +index 46f7f63ae..25f3cda35 100644 +--- a/gcc/config/loongarch/constraints.md ++++ b/gcc/config/loongarch/constraints.md +@@ -60,7 +60,22 @@ + ;; "I" "A signed 12-bit constant (for arithmetic instructions)." + ;; "J" "Integer zero." + ;; "K" "An unsigned 12-bit constant (for logic instructions)." +-;; "L" <-----unused ++;; "L" - ++;; "La" ++;; "A signed constant in [-4096, 2048) or (2047, 4094]." ++;; "Lb" ++;; "A signed 32-bit constant and low 16-bit is zero, which can be ++;; added onto a register with addu16i.d. It matches nothing if ++;; the addu16i.d instruction is not available." ++;; "Lc" ++;; "A signed 64-bit constant can be expressed as Lb + I, but not a ++;; single Lb or I." ++;; "Ld" ++;; "A signed 64-bit constant can be expressed as Lb + Lb, but not a ++;; single Lb." ++;; "Le" ++;; "A signed 32-bit constant can be expressed as Lb + I, but not a ++;; single Lb or I." + ;; "M" <-----unused + ;; "N" <-----unused + ;; "O" <-----unused +@@ -170,6 +185,35 @@ + (and (match_code "const_int") + (match_test "IMM12_OPERAND_UNSIGNED (ival)"))) + ++(define_constraint "La" ++ "A signed constant in [-4096, 2048) or (2047, 4094]." ++ (and (match_code "const_int") ++ (match_test "DUAL_IMM12_OPERAND (ival)"))) ++ ++(define_constraint "Lb" ++ "A signed 32-bit constant and low 16-bit is zero, which can be added ++ onto a register with addu16i.d." ++ (and (match_code "const_int") ++ (match_test "ADDU16I_OPERAND (ival)"))) ++ ++(define_constraint "Lc" ++ "A signed 64-bit constant can be expressed as Lb + I, but not a single Lb ++ or I." ++ (and (match_code "const_int") ++ (match_test "loongarch_addu16i_imm12_operand_p (ival, DImode)"))) ++ ++(define_constraint "Ld" ++ "A signed 64-bit constant can be expressed as Lb + Lb, but not a single ++ Lb." ++ (and (match_code "const_int") ++ (match_test "DUAL_ADDU16I_OPERAND (ival)"))) ++ ++(define_constraint "Le" ++ "A signed 32-bit constant can be expressed as Lb + I, but not a single Lb ++ or I." ++ (and (match_code "const_int") ++ (match_test "loongarch_addu16i_imm12_operand_p (ival, SImode)"))) ++ + (define_constraint "Yd" + "@internal + A constant @code{move_operand} that can be safely loaded using +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +index 77b221724..0a9b47722 100644 +--- a/gcc/config/loongarch/loongarch-protos.h ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -83,6 +83,8 @@ extern rtx loongarch_legitimize_call_address (rtx); + extern rtx loongarch_subword (rtx, bool); + extern bool loongarch_split_move_p (rtx, rtx); + extern void loongarch_split_move (rtx, rtx, rtx); ++extern bool loongarch_addu16i_imm12_operand_p (HOST_WIDE_INT, machine_mode); ++extern void loongarch_split_plus_constant (rtx *, machine_mode); + extern const char *loongarch_output_move (rtx, rtx); + extern bool loongarch_cfun_has_cprestore_slot_p (void); + #ifdef RTX_CODE +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 1a4686f03..233dddbac 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -3753,6 +3753,50 @@ loongarch_split_move (rtx dest, rtx src, rtx insn_) + } + } + ++/* Check if adding an integer constant value for a specific mode can be ++ performed with an addu16i.d instruction and an addi.{w/d} ++ instruction. */ ++ ++bool ++loongarch_addu16i_imm12_operand_p (HOST_WIDE_INT value, machine_mode mode) ++{ ++ /* Not necessary, but avoid unnecessary calculation if !TARGET_64BIT. */ ++ if (!TARGET_64BIT) ++ return false; ++ ++ if ((value & 0xffff) == 0) ++ return false; ++ ++ if (IMM12_OPERAND (value)) ++ return false; ++ ++ value = (value & ~HWIT_UC_0xFFF) + ((value & 0x800) << 1); ++ return ADDU16I_OPERAND (trunc_int_for_mode (value, mode)); ++} ++ ++/* Split one integer constant op[0] into two (op[1] and op[2]) for constant ++ plus operation in a specific mode. The splitted constants can be added ++ onto a register with a single instruction (addi.{d/w} or addu16i.d). */ ++ ++void ++loongarch_split_plus_constant (rtx *op, machine_mode mode) ++{ ++ HOST_WIDE_INT v = INTVAL (op[0]), a; ++ ++ if (DUAL_IMM12_OPERAND (v)) ++ a = (v > 0 ? 2047 : -2048); ++ else if (loongarch_addu16i_imm12_operand_p (v, mode)) ++ a = (v & ~HWIT_UC_0xFFF) + ((v & 0x800) << 1); ++ else if (mode == DImode && DUAL_ADDU16I_OPERAND (v)) ++ a = (v > 0 ? 0x7fff : -0x8000) << 16; ++ else ++ gcc_unreachable (); ++ ++ op[1] = gen_int_mode (a, mode); ++ v = v - (unsigned HOST_WIDE_INT) a; ++ op[2] = gen_int_mode (v, mode); ++} ++ + /* Return true if a move from SRC to DEST in INSN should be split. */ + + static bool +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index c6e37b1b4..9d3cd9ca0 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -612,6 +612,25 @@ enum reg_class + + #define CONST_LOW_PART(VALUE) ((VALUE) - CONST_HIGH_PART (VALUE)) + ++/* True if VALUE can be added onto a register with one addu16i.d ++ instruction. */ ++ ++#define ADDU16I_OPERAND(VALUE) \ ++ (TARGET_64BIT && (((VALUE) & 0xffff) == 0 \ ++ && IMM16_OPERAND ((HOST_WIDE_INT) (VALUE) / 65536))) ++ ++/* True if VALUE can be added onto a register with two addi.{d/w} ++ instructions, but not one addi.{d/w} instruction. */ ++#define DUAL_IMM12_OPERAND(VALUE) \ ++ (IN_RANGE ((VALUE), -4096, 4094) && !IMM12_OPERAND (VALUE)) ++ ++/* True if VALUE can be added onto a register with two addu16i.d ++ instruction, but not one addu16i.d instruction. */ ++#define DUAL_ADDU16I_OPERAND(VALUE) \ ++ (TARGET_64BIT && (((VALUE) & 0xffff) == 0 \ ++ && !ADDU16I_OPERAND (VALUE) \ ++ && IN_RANGE ((VALUE) / 65536, -0x10000, 0xfffe))) ++ + #define IMM12_INT(X) IMM12_OPERAND (INTVAL (X)) + #define IMM12_INT_UNSIGNED(X) IMM12_OPERAND_UNSIGNED (INTVAL (X)) + #define LU12I_INT(X) LU12I_OPERAND (INTVAL (X)) +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 833b94753..b2f7c7f78 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -598,24 +598,64 @@ + [(set_attr "type" "fadd") + (set_attr "mode" "")]) + +-(define_insn "add3" +- [(set (match_operand:GPR 0 "register_operand" "=r,r") +- (plus:GPR (match_operand:GPR 1 "register_operand" "r,r") +- (match_operand:GPR 2 "arith_operand" "r,I")))] ++(define_insn_and_split "add3" ++ [(set (match_operand:GPR 0 "register_operand" "=r,r,r,r,r,r,r") ++ (plus:GPR (match_operand:GPR 1 "register_operand" "r,r,r,r,r,r,r") ++ (match_operand:GPR 2 "plus__operand" ++ "r,I,La,Lb,Lc,Ld,Le")))] + "" +- "add%i2.\t%0,%1,%2"; ++ "@ ++ add.\t%0,%1,%2 ++ addi.\t%0,%1,%2 ++ # ++ * operands[2] = GEN_INT (INTVAL (operands[2]) / 65536); \ ++ return \"addu16i.d\t%0,%1,%2\"; ++ # ++ # ++ #" ++ "CONST_INT_P (operands[2]) && !IMM12_INT (operands[2]) \ ++ && !ADDU16I_OPERAND (INTVAL (operands[2]))" ++ [(set (match_dup 0) (plus:GPR (match_dup 1) (match_dup 3))) ++ (set (match_dup 0) (plus:GPR (match_dup 0) (match_dup 4)))] ++ { ++ loongarch_split_plus_constant (&operands[2], mode); ++ } + [(set_attr "alu_type" "add") +- (set_attr "mode" "")]) +- +-(define_insn "*addsi3_extended" +- [(set (match_operand:DI 0 "register_operand" "=r,r") ++ (set_attr "mode" "") ++ (set_attr "insn_count" "1,1,2,1,2,2,2") ++ (set (attr "enabled") ++ (cond ++ [(match_test "mode != DImode && which_alternative == 4") ++ (const_string "no") ++ (match_test "mode != DImode && which_alternative == 5") ++ (const_string "no") ++ (match_test "mode != SImode && which_alternative == 6") ++ (const_string "no")] ++ (const_string "yes")))]) ++ ++(define_insn_and_split "*addsi3_extended" ++ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r") + (sign_extend:DI +- (plus:SI (match_operand:SI 1 "register_operand" "r,r") +- (match_operand:SI 2 "arith_operand" "r,I"))))] ++ (plus:SI (match_operand:SI 1 "register_operand" "r,r,r,r") ++ (match_operand:SI 2 "plus_si_extend_operand" ++ "r,I,La,Le"))))] + "TARGET_64BIT" +- "add%i2.w\t%0,%1,%2" ++ "@ ++ add.w\t%0,%1,%2 ++ addi.w\t%0,%1,%2 ++ # ++ #" ++ "CONST_INT_P (operands[2]) && !IMM12_INT (operands[2])" ++ [(set (subreg:SI (match_dup 0) 0) (plus:SI (match_dup 1) (match_dup 3))) ++ (set (match_dup 0) ++ (sign_extend:DI (plus:SI (subreg:SI (match_dup 0) 0) ++ (match_dup 4))))] ++ { ++ loongarch_split_plus_constant (&operands[2], SImode); ++ } + [(set_attr "alu_type" "add") +- (set_attr "mode" "SI")]) ++ (set_attr "mode" "SI") ++ (set_attr "insn_count" "1,1,2,2")]) + + + ;; +diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md +index 3c32b2987..4966d5569 100644 +--- a/gcc/config/loongarch/predicates.md ++++ b/gcc/config/loongarch/predicates.md +@@ -39,14 +39,50 @@ + (and (match_code "const_int") + (match_test "IMM12_OPERAND (INTVAL (op))"))) + ++(define_predicate "const_dual_imm12_operand" ++ (and (match_code "const_int") ++ (match_test "DUAL_IMM12_OPERAND (INTVAL (op))"))) ++ + (define_predicate "const_imm16_operand" + (and (match_code "const_int") + (match_test "IMM16_OPERAND (INTVAL (op))"))) + ++(define_predicate "const_addu16i_operand" ++ (and (match_code "const_int") ++ (match_test "ADDU16I_OPERAND (INTVAL (op))"))) ++ ++(define_predicate "const_addu16i_imm12_di_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_addu16i_imm12_operand_p (INTVAL (op), DImode)"))) ++ ++(define_predicate "const_addu16i_imm12_si_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_addu16i_imm12_operand_p (INTVAL (op), SImode)"))) ++ ++(define_predicate "const_dual_addu16i_operand" ++ (and (match_code "const_int") ++ (match_test "DUAL_ADDU16I_OPERAND (INTVAL (op))"))) ++ + (define_predicate "arith_operand" + (ior (match_operand 0 "const_arith_operand") + (match_operand 0 "register_operand"))) + ++(define_predicate "plus_di_operand" ++ (ior (match_operand 0 "arith_operand") ++ (match_operand 0 "const_dual_imm12_operand") ++ (match_operand 0 "const_addu16i_operand") ++ (match_operand 0 "const_addu16i_imm12_di_operand") ++ (match_operand 0 "const_dual_addu16i_operand"))) ++ ++(define_predicate "plus_si_extend_operand" ++ (ior (match_operand 0 "arith_operand") ++ (match_operand 0 "const_dual_imm12_operand") ++ (match_operand 0 "const_addu16i_imm12_si_operand"))) ++ ++(define_predicate "plus_si_operand" ++ (ior (match_operand 0 "plus_si_extend_operand") ++ (match_operand 0 "const_addu16i_operand"))) ++ + (define_predicate "const_immalsl_operand" + (and (match_code "const_int") + (match_test "IN_RANGE (INTVAL (op), 1, 4)"))) +diff --git a/gcc/testsuite/gcc.target/loongarch/add-const.c b/gcc/testsuite/gcc.target/loongarch/add-const.c +new file mode 100644 +index 000000000..7b6a7cb92 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/add-const.c +@@ -0,0 +1,45 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O -mabi=lp64d" } */ ++ ++/* None of these functions should load the const operand into a temp ++ register. */ ++ ++/* { dg-final { scan-assembler-not "add\\.[dw]" } } */ ++ ++unsigned long f01 (unsigned long x) { return x + 1; } ++unsigned long f02 (unsigned long x) { return x - 1; } ++unsigned long f03 (unsigned long x) { return x + 2047; } ++unsigned long f04 (unsigned long x) { return x + 4094; } ++unsigned long f05 (unsigned long x) { return x - 2048; } ++unsigned long f06 (unsigned long x) { return x - 4096; } ++unsigned long f07 (unsigned long x) { return x + 0x7fff0000; } ++unsigned long f08 (unsigned long x) { return x - 0x80000000l; } ++unsigned long f09 (unsigned long x) { return x + 0x7fff0000l * 2; } ++unsigned long f10 (unsigned long x) { return x - 0x80000000l * 2; } ++unsigned long f11 (unsigned long x) { return x + 0x7fff0000 + 0x1; } ++unsigned long f12 (unsigned long x) { return x + 0x7fff0000 - 0x1; } ++unsigned long f13 (unsigned long x) { return x + 0x7fff0000 + 0x7ff; } ++unsigned long f14 (unsigned long x) { return x + 0x7fff0000 - 0x800; } ++unsigned long f15 (unsigned long x) { return x - 0x80000000l - 1; } ++unsigned long f16 (unsigned long x) { return x - 0x80000000l + 1; } ++unsigned long f17 (unsigned long x) { return x - 0x80000000l - 0x800; } ++unsigned long f18 (unsigned long x) { return x - 0x80000000l + 0x7ff; } ++ ++unsigned int g01 (unsigned int x) { return x + 1; } ++unsigned int g02 (unsigned int x) { return x - 1; } ++unsigned int g03 (unsigned int x) { return x + 2047; } ++unsigned int g04 (unsigned int x) { return x + 4094; } ++unsigned int g05 (unsigned int x) { return x - 2048; } ++unsigned int g06 (unsigned int x) { return x - 4096; } ++unsigned int g07 (unsigned int x) { return x + 0x7fff0000; } ++unsigned int g08 (unsigned int x) { return x - 0x80000000l; } ++unsigned int g09 (unsigned int x) { return x + 0x7fff0000l * 2; } ++unsigned int g10 (unsigned int x) { return x - 0x80000000l * 2; } ++unsigned int g11 (unsigned int x) { return x + 0x7fff0000 + 0x1; } ++unsigned int g12 (unsigned int x) { return x + 0x7fff0000 - 0x1; } ++unsigned int g13 (unsigned int x) { return x + 0x7fff0000 + 0x7ff; } ++unsigned int g14 (unsigned int x) { return x + 0x7fff0000 - 0x800; } ++unsigned int g15 (unsigned int x) { return x - 0x80000000l - 1; } ++unsigned int g16 (unsigned int x) { return x - 0x80000000l + 1; } ++unsigned int g17 (unsigned int x) { return x - 0x80000000l - 0x800; } ++unsigned int g18 (unsigned int x) { return x - 0x80000000l + 0x7ff; } +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c +index 3533fe7b6..cd72154f4 100644 +--- a/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c +@@ -6,7 +6,7 @@ + #define SIZE 128*1024 + #include "stack-check-prologue.h" + +-/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 131088} 1 } } */ ++/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 131072} 1 } } */ + /* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 0} 1 } } */ + + /* Checks that the CFA notes are correct for every sp adjustment. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c +index e5e711105..3e5ca05b2 100644 +--- a/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c +@@ -6,7 +6,7 @@ + #define SIZE 1280*1024 + 512 + #include "stack-check-prologue.h" + +-/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 1311248} 1 } } */ ++/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 1311232} 1 } } */ + /* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 0} 1 } } */ + + /* Checks that the CFA notes are correct for every sp adjustment. */ +-- +2.33.0 + diff --git a/LoongArch-Optimize-immediate-load.patch b/LoongArch-Optimize-immediate-load.patch new file mode 100644 index 0000000000000000000000000000000000000000..b4d9538cb5589a84c1f4afac3773796ebd3db17d --- /dev/null +++ b/LoongArch-Optimize-immediate-load.patch @@ -0,0 +1,338 @@ +From b533b615ae47b97d51eeb83e1a63f7c72407430f Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Thu, 17 Nov 2022 17:08:36 +0800 +Subject: [PATCH 032/124] LoongArch: Optimize immediate load. + +The immediate number is split in the Split pass, not in the expand pass. + +Because loop2_invariant pass will extract the instructions that do not change +in the loop out of the loop, some instructions will not meet the extraction +conditions if the machine performs immediate decomposition while expand pass, +so the immediate decomposition will be transferred to the split process. + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc (enum loongarch_load_imm_method): + Remove the member METHOD_INSV that is not currently used. + (struct loongarch_integer_op): Define a new member curr_value, + that records the value of the number stored in the destination + register immediately after the current instruction has run. + (loongarch_build_integer): Assign a value to the curr_value member variable. + (loongarch_move_integer): Adds information for the immediate load instruction. + * config/loongarch/loongarch.md (*movdi_32bit): Redefine as define_insn_and_split. + (*movdi_64bit): Likewise. + (*movsi_internal): Likewise. + (*movhi_internal): Likewise. + * config/loongarch/predicates.md: Return true as long as it is CONST_INT, ensure + that the immediate number is not optimized by decomposition during expand + optimization loop. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/imm-load.c: New test. + * gcc.target/loongarch/imm-load1.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.cc | 62 ++++++++++--------- + gcc/config/loongarch/loongarch.md | 44 +++++++++++-- + gcc/config/loongarch/predicates.md | 2 +- + gcc/testsuite/gcc.target/loongarch/imm-load.c | 10 +++ + .../gcc.target/loongarch/imm-load1.c | 26 ++++++++ + 5 files changed, 110 insertions(+), 34 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/imm-load.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/imm-load1.c + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 622c9435b..f45a49f90 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -139,22 +139,21 @@ struct loongarch_address_info + + METHOD_LU52I: + Load 52-63 bit of the immediate number. +- +- METHOD_INSV: +- immediate like 0xfff00000fffffxxx +- */ ++*/ + enum loongarch_load_imm_method + { + METHOD_NORMAL, + METHOD_LU32I, +- METHOD_LU52I, +- METHOD_INSV ++ METHOD_LU52I + }; + + struct loongarch_integer_op + { + enum rtx_code code; + HOST_WIDE_INT value; ++ /* Represent the result of the immediate count of the load instruction at ++ each step. */ ++ HOST_WIDE_INT curr_value; + enum loongarch_load_imm_method method; + }; + +@@ -1474,24 +1473,27 @@ loongarch_build_integer (struct loongarch_integer_op *codes, + { + /* The value of the lower 32 bit be loaded with one instruction. + lu12i.w. */ +- codes[0].code = UNKNOWN; +- codes[0].method = METHOD_NORMAL; +- codes[0].value = low_part; ++ codes[cost].code = UNKNOWN; ++ codes[cost].method = METHOD_NORMAL; ++ codes[cost].value = low_part; ++ codes[cost].curr_value = low_part; + cost++; + } + else + { + /* lu12i.w + ior. */ +- codes[0].code = UNKNOWN; +- codes[0].method = METHOD_NORMAL; +- codes[0].value = low_part & ~(IMM_REACH - 1); ++ codes[cost].code = UNKNOWN; ++ codes[cost].method = METHOD_NORMAL; ++ codes[cost].value = low_part & ~(IMM_REACH - 1); ++ codes[cost].curr_value = codes[cost].value; + cost++; + HOST_WIDE_INT iorv = low_part & (IMM_REACH - 1); + if (iorv != 0) + { +- codes[1].code = IOR; +- codes[1].method = METHOD_NORMAL; +- codes[1].value = iorv; ++ codes[cost].code = IOR; ++ codes[cost].method = METHOD_NORMAL; ++ codes[cost].value = iorv; ++ codes[cost].curr_value = low_part; + cost++; + } + } +@@ -1514,11 +1516,14 @@ loongarch_build_integer (struct loongarch_integer_op *codes, + { + codes[cost].method = METHOD_LU52I; + codes[cost].value = value & LU52I_B; ++ codes[cost].curr_value = value; + return cost + 1; + } + + codes[cost].method = METHOD_LU32I; + codes[cost].value = (value & LU32I_B) | (sign51 ? LU52I_B : 0); ++ codes[cost].curr_value = (value & 0xfffffffffffff) ++ | (sign51 ? LU52I_B : 0); + cost++; + + /* Determine whether the 52-61 bits are sign-extended from the low order, +@@ -1527,6 +1532,7 @@ loongarch_build_integer (struct loongarch_integer_op *codes, + { + codes[cost].method = METHOD_LU52I; + codes[cost].value = value & LU52I_B; ++ codes[cost].curr_value = value; + cost++; + } + } +@@ -2910,6 +2916,9 @@ loongarch_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value) + else + x = force_reg (mode, x); + ++ set_unique_reg_note (get_last_insn (), REG_EQUAL, ++ GEN_INT (codes[i-1].curr_value)); ++ + switch (codes[i].method) + { + case METHOD_NORMAL: +@@ -2917,22 +2926,17 @@ loongarch_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value) + GEN_INT (codes[i].value)); + break; + case METHOD_LU32I: +- emit_insn ( +- gen_rtx_SET (x, +- gen_rtx_IOR (DImode, +- gen_rtx_ZERO_EXTEND ( +- DImode, gen_rtx_SUBREG (SImode, x, 0)), +- GEN_INT (codes[i].value)))); ++ gcc_assert (mode == DImode); ++ x = gen_rtx_IOR (DImode, ++ gen_rtx_ZERO_EXTEND (DImode, ++ gen_rtx_SUBREG (SImode, x, 0)), ++ GEN_INT (codes[i].value)); + break; + case METHOD_LU52I: +- emit_insn (gen_lu52i_d (x, x, GEN_INT (0xfffffffffffff), +- GEN_INT (codes[i].value))); +- break; +- case METHOD_INSV: +- emit_insn ( +- gen_rtx_SET (gen_rtx_ZERO_EXTRACT (DImode, x, GEN_INT (20), +- GEN_INT (32)), +- gen_rtx_REG (DImode, 0))); ++ gcc_assert (mode == DImode); ++ x = gen_rtx_IOR (DImode, ++ gen_rtx_AND (DImode, x, GEN_INT (0xfffffffffffff)), ++ GEN_INT (codes[i].value)); + break; + default: + gcc_unreachable (); +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 2fda53819..f61db66d5 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -1718,23 +1718,41 @@ + DONE; + }) + +-(define_insn "*movdi_32bit" ++(define_insn_and_split "*movdi_32bit" + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,w,*f,*f,*r,*m") + (match_operand:DI 1 "move_operand" "r,i,w,r,*J*r,*m,*f,*f"))] + "!TARGET_64BIT + && (register_operand (operands[0], DImode) + || reg_or_0_operand (operands[1], DImode))" + { return loongarch_output_move (operands[0], operands[1]); } ++ "CONST_INT_P (operands[1]) && REG_P (operands[0]) && GP_REG_P (REGNO ++ (operands[0]))" ++ [(const_int 0)] ++ " ++{ ++ loongarch_move_integer (operands[0], operands[0], INTVAL (operands[1])); ++ DONE; ++} ++ " + [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore") + (set_attr "mode" "DI")]) + +-(define_insn "*movdi_64bit" ++(define_insn_and_split "*movdi_64bit" + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,w,*f,*f,*r,*m") + (match_operand:DI 1 "move_operand" "r,Yd,w,rJ,*r*J,*m,*f,*f"))] + "TARGET_64BIT + && (register_operand (operands[0], DImode) + || reg_or_0_operand (operands[1], DImode))" + { return loongarch_output_move (operands[0], operands[1]); } ++ "CONST_INT_P (operands[1]) && REG_P (operands[0]) && GP_REG_P (REGNO ++ (operands[0]))" ++ [(const_int 0)] ++ " ++{ ++ loongarch_move_integer (operands[0], operands[0], INTVAL (operands[1])); ++ DONE; ++} ++ " + [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore") + (set_attr "mode" "DI")]) + +@@ -1749,12 +1767,21 @@ + DONE; + }) + +-(define_insn "*movsi_internal" ++(define_insn_and_split "*movsi_internal" + [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,w,*f,*f,*r,*m,*r,*z") + (match_operand:SI 1 "move_operand" "r,Yd,w,rJ,*r*J,*m,*f,*f,*z,*r"))] + "(register_operand (operands[0], SImode) + || reg_or_0_operand (operands[1], SImode))" + { return loongarch_output_move (operands[0], operands[1]); } ++ "CONST_INT_P (operands[1]) && REG_P (operands[0]) && GP_REG_P (REGNO ++ (operands[0]))" ++ [(const_int 0)] ++ " ++{ ++ loongarch_move_integer (operands[0], operands[0], INTVAL (operands[1])); ++ DONE; ++} ++ " + [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore,mftg,mgtf") + (set_attr "mode" "SI")]) + +@@ -1774,12 +1801,21 @@ + DONE; + }) + +-(define_insn "*movhi_internal" ++(define_insn_and_split "*movhi_internal" + [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,r,m,r,k") + (match_operand:HI 1 "move_operand" "r,Yd,I,m,rJ,k,rJ"))] + "(register_operand (operands[0], HImode) + || reg_or_0_operand (operands[1], HImode))" + { return loongarch_output_move (operands[0], operands[1]); } ++ "CONST_INT_P (operands[1]) && REG_P (operands[0]) && GP_REG_P (REGNO ++ (operands[0]))" ++ [(const_int 0)] ++ " ++{ ++ loongarch_move_integer (operands[0], operands[0], INTVAL (operands[1])); ++ DONE; ++} ++ " + [(set_attr "move_type" "move,const,const,load,store,load,store") + (set_attr "mode" "HI")]) + +diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md +index 8bd0c1376..58c3dc226 100644 +--- a/gcc/config/loongarch/predicates.md ++++ b/gcc/config/loongarch/predicates.md +@@ -226,7 +226,7 @@ + switch (GET_CODE (op)) + { + case CONST_INT: +- return !splittable_const_int_operand (op, mode); ++ return true; + + case CONST: + case SYMBOL_REF: +diff --git a/gcc/testsuite/gcc.target/loongarch/imm-load.c b/gcc/testsuite/gcc.target/loongarch/imm-load.c +new file mode 100644 +index 000000000..c04ca3399 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/imm-load.c +@@ -0,0 +1,10 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O2 -fdump-rtl-split1" } */ ++ ++long int ++test (void) ++{ ++ return 0x1234567890abcdef; ++} ++/* { dg-final { scan-rtl-dump-times "scanning new insn with uid" 6 "split1" } } */ ++ +diff --git a/gcc/testsuite/gcc.target/loongarch/imm-load1.c b/gcc/testsuite/gcc.target/loongarch/imm-load1.c +new file mode 100644 +index 000000000..2ff029712 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/imm-load1.c +@@ -0,0 +1,26 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O2" } */ ++/* { dg-final { scan-assembler "test:.*lu52i\.d.*\n\taddi\.w.*\n\.L2:" } } */ ++ ++ ++extern long long b[10]; ++static inline long long ++repeat_bytes (void) ++{ ++ long long r = 0x0101010101010101; ++ ++ return r; ++} ++ ++static inline long long ++highbit_mask (long long m) ++{ ++ return m & repeat_bytes (); ++} ++ ++void test(long long *a) ++{ ++ for (int i = 0; i < 10; i++) ++ b[i] = highbit_mask (a[i]); ++ ++} +-- +2.33.0 + diff --git a/LoongArch-Optimize-switch-with-sign-extended-index.patch b/LoongArch-Optimize-switch-with-sign-extended-index.patch new file mode 100644 index 0000000000000000000000000000000000000000..c80a21764cbb229fffaa2c0bc7d49c48ac0db382 --- /dev/null +++ b/LoongArch-Optimize-switch-with-sign-extended-index.patch @@ -0,0 +1,114 @@ +From c6d4efda0fa064dfe1d3cc1b9abf8051a82cd74f Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Sat, 2 Sep 2023 10:59:55 +0800 +Subject: [PATCH 056/124] LoongArch: Optimize switch with sign-extended index. + +The patch refers to the submission of RISCV +7bbce9b50302959286381d9177818642bceaf301. + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc (loongarch_extend_comparands): + In unsigned QImode test, check for sign extended subreg and/or + constant operands, and do a sign extension in that case. + * config/loongarch/loongarch.md (TARGET_64BIT): Define + template cbranchqi4. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/switch-qi.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.cc | 16 +++++++++++++--- + gcc/config/loongarch/loongarch.md | 10 +++++++--- + gcc/testsuite/gcc.target/loongarch/switch-qi.c | 16 ++++++++++++++++ + 3 files changed, 36 insertions(+), 6 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/switch-qi.c + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 7b48e3216..41d344b82 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -4221,11 +4221,21 @@ loongarch_allocate_fcc (machine_mode mode) + static void + loongarch_extend_comparands (rtx_code code, rtx *op0, rtx *op1) + { +- /* Comparisons consider all XLEN bits, so extend sub-XLEN values. */ ++ /* Comparisons consider all GRLEN bits, so extend sub-GRLEN values. */ + if (GET_MODE_SIZE (word_mode) > GET_MODE_SIZE (GET_MODE (*op0))) + { +- /* TODO: checkout It is more profitable to zero-extend QImode values. */ +- if (unsigned_condition (code) == code && GET_MODE (*op0) == QImode) ++ /* It is more profitable to zero-extend QImode values. But not if the ++ first operand has already been sign-extended, and the second one is ++ is a constant or has already been sign-extended also. */ ++ if (unsigned_condition (code) == code ++ && (GET_MODE (*op0) == QImode ++ && ! (GET_CODE (*op0) == SUBREG ++ && SUBREG_PROMOTED_VAR_P (*op0) ++ && SUBREG_PROMOTED_SIGNED_P (*op0) ++ && (CONST_INT_P (*op1) ++ || (GET_CODE (*op1) == SUBREG ++ && SUBREG_PROMOTED_VAR_P (*op1) ++ && SUBREG_PROMOTED_SIGNED_P (*op1)))))) + { + *op0 = gen_rtx_ZERO_EXTEND (word_mode, *op0); + if (CONST_INT_P (*op1)) +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index cf7441e0b..a5e9352ca 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -357,7 +357,7 @@ + ;; pointer-sized quantities. Exactly one of the two alternatives will match. + (define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")]) + +-;; Likewise, but for XLEN-sized quantities. ++;; Likewise, but for GRLEN-sized quantities. + (define_mode_iterator X [(SI "!TARGET_64BIT") (DI "TARGET_64BIT")]) + + ;; 64-bit modes for which we provide move patterns. +@@ -2733,11 +2733,15 @@ + [(set_attr "type" "branch")]) + + ++;; Branches operate on GRLEN-sized quantities, but for LoongArch64 we accept ++;; QImode values so we can force zero-extension. ++(define_mode_iterator BR [(QI "TARGET_64BIT") SI (DI "TARGET_64BIT")]) ++ + (define_expand "cbranch4" + [(set (pc) + (if_then_else (match_operator 0 "comparison_operator" +- [(match_operand:GPR 1 "register_operand") +- (match_operand:GPR 2 "nonmemory_operand")]) ++ [(match_operand:BR 1 "register_operand") ++ (match_operand:BR 2 "nonmemory_operand")]) + (label_ref (match_operand 3 "")) + (pc)))] + "" +diff --git a/gcc/testsuite/gcc.target/loongarch/switch-qi.c b/gcc/testsuite/gcc.target/loongarch/switch-qi.c +new file mode 100644 +index 000000000..dd192fd49 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/switch-qi.c +@@ -0,0 +1,16 @@ ++/* { dg-do compile } */ ++/* { dg-options "-march=loongarch64 -mabi=lp64d" } */ ++/* { dg-final { scan-assembler-not "bstrpick" } } */ ++ ++/* Test for loongarch_extend_comparands patch. */ ++extern void asdf (int); ++void ++foo (signed char x) { ++ switch (x) { ++ case 0: asdf (10); break; ++ case 1: asdf (11); break; ++ case 2: asdf (12); break; ++ case 3: asdf (13); break; ++ case 4: asdf (14); break; ++ } ++} +-- +2.33.0 + diff --git a/LoongArch-Optimize-the-implementation-of-stack-check.patch b/LoongArch-Optimize-the-implementation-of-stack-check.patch new file mode 100644 index 0000000000000000000000000000000000000000..f365ff257b50caf498fbb8aaabc9d2fc43e2e983 --- /dev/null +++ b/LoongArch-Optimize-the-implementation-of-stack-check.patch @@ -0,0 +1,810 @@ +From d3615b555d6885dba298f7b339740be11cb65a8f Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Tue, 29 Nov 2022 16:06:12 +0800 +Subject: [PATCH 033/124] LoongArch: Optimize the implementation of stack + check. + +The old stack check was performed before the stack was dropped, +which would cause the detection tool to report a memory leak. + +The current stack check scheme is as follows: + +'-fstack-clash-protection': +1. When the frame->total_size is smaller than the guard page size, + the stack is dropped according to the original scheme, and there + is no need to perform stack detection in the prologue. +2. When frame->total_size is greater than or equal to guard page size, + the first step to drop the stack is to drop the space required by + the caller-save registers. This space needs to save the caller-save + registers, so an implicit stack check is performed. + So just need to check the rest of the stack space. + +'-fstack-check': +There is no one-time stack drop and then page-by-page detection as +described in the document. It is also the same as +'-fstack-clash-protection', which is detected immediately after page drop. + +It is judged that when frame->total_size is not 0, only the size required +to save the s register is dropped for the first stack down. + +The test cases are referenced from aarch64. + +gcc/ChangeLog: + + * config/loongarch/linux.h (STACK_CHECK_MOVING_SP): + Define this macro to 1. + * config/loongarch/loongarch.cc (STACK_CLASH_PROTECTION_GUARD_SIZE): + Size of guard page. + (loongarch_first_stack_step): Return the size of the first drop stack + according to whether stack checking is performed. + (loongarch_emit_probe_stack_range): Adjust the method of stack checking in prologue. + (loongarch_output_probe_stack_range): Delete useless code. + (loongarch_expand_prologue): Adjust the method of stack checking in prologue. + (loongarch_option_override_internal): Enforce that interval is the same + size as size so the mid-end does the right thing. + * config/loongarch/loongarch.h (STACK_CLASH_MAX_UNROLL_PAGES): + New macro decide whether to loop stack detection. + +gcc/testsuite/ChangeLog: + + * lib/target-supports.exp: + * gcc.target/loongarch/stack-check-alloca-1.c: New test. + * gcc.target/loongarch/stack-check-alloca-2.c: New test. + * gcc.target/loongarch/stack-check-alloca-3.c: New test. + * gcc.target/loongarch/stack-check-alloca-4.c: New test. + * gcc.target/loongarch/stack-check-alloca-5.c: New test. + * gcc.target/loongarch/stack-check-alloca-6.c: New test. + * gcc.target/loongarch/stack-check-alloca.h: New test. + * gcc.target/loongarch/stack-check-cfa-1.c: New test. + * gcc.target/loongarch/stack-check-cfa-2.c: New test. + * gcc.target/loongarch/stack-check-prologue-1.c: New test. + * gcc.target/loongarch/stack-check-prologue-2.c: New test. + * gcc.target/loongarch/stack-check-prologue-3.c: New test. + * gcc.target/loongarch/stack-check-prologue-4.c: New test. + * gcc.target/loongarch/stack-check-prologue-5.c: New test. + * gcc.target/loongarch/stack-check-prologue-6.c: New test. + * gcc.target/loongarch/stack-check-prologue-7.c: New test. + * gcc.target/loongarch/stack-check-prologue.h: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/linux.h | 3 + + gcc/config/loongarch/loongarch.cc | 248 +++++++++++------- + gcc/config/loongarch/loongarch.h | 4 + + .../loongarch/stack-check-alloca-1.c | 15 ++ + .../loongarch/stack-check-alloca-2.c | 12 + + .../loongarch/stack-check-alloca-3.c | 12 + + .../loongarch/stack-check-alloca-4.c | 12 + + .../loongarch/stack-check-alloca-5.c | 13 + + .../loongarch/stack-check-alloca-6.c | 13 + + .../gcc.target/loongarch/stack-check-alloca.h | 15 ++ + .../gcc.target/loongarch/stack-check-cfa-1.c | 12 + + .../gcc.target/loongarch/stack-check-cfa-2.c | 12 + + .../loongarch/stack-check-prologue-1.c | 11 + + .../loongarch/stack-check-prologue-2.c | 11 + + .../loongarch/stack-check-prologue-3.c | 11 + + .../loongarch/stack-check-prologue-4.c | 11 + + .../loongarch/stack-check-prologue-5.c | 12 + + .../loongarch/stack-check-prologue-6.c | 11 + + .../loongarch/stack-check-prologue-7.c | 12 + + .../loongarch/stack-check-prologue.h | 5 + + gcc/testsuite/lib/target-supports.exp | 7 +- + 21 files changed, 361 insertions(+), 101 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-4.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-5.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-6.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca.h + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-4.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-5.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-6.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-7.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue.h + +diff --git a/gcc/config/loongarch/linux.h b/gcc/config/loongarch/linux.h +index 110d0fab9..00039ac18 100644 +--- a/gcc/config/loongarch/linux.h ++++ b/gcc/config/loongarch/linux.h +@@ -48,3 +48,6 @@ along with GCC; see the file COPYING3. If not see + #define STACK_CHECK_PROTECT (TARGET_64BIT ? 16 * 1024 : 12 * 1024) + + #define TARGET_ASM_FILE_END file_end_indicate_exec_stack ++ ++/* The stack pointer needs to be moved while checking the stack. */ ++#define STACK_CHECK_MOVING_SP 1 +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index f45a49f90..e59edc4cd 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -257,6 +257,10 @@ const char *const + loongarch_fp_conditions[16]= {LARCH_FP_CONDITIONS (STRINGIFY)}; + #undef STRINGIFY + ++/* Size of guard page. */ ++#define STACK_CLASH_PROTECTION_GUARD_SIZE \ ++ (1 << param_stack_clash_protection_guard_size) ++ + /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at + least PARM_BOUNDARY bits of alignment, but will be given anything up + to PREFERRED_STACK_BOUNDARY bits if the type requires it. */ +@@ -1069,11 +1073,20 @@ loongarch_restore_reg (rtx reg, rtx mem) + static HOST_WIDE_INT + loongarch_first_stack_step (struct loongarch_frame_info *frame) + { ++ HOST_WIDE_INT min_first_step ++ = LARCH_STACK_ALIGN (frame->total_size - frame->fp_sp_offset); ++ ++ /* When stack checking is required, if the sum of frame->total_size ++ and stack_check_protect is greater than stack clash protection guard ++ size, then return min_first_step. */ ++ if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK ++ || (flag_stack_clash_protection ++ && frame->total_size > STACK_CLASH_PROTECTION_GUARD_SIZE)) ++ return min_first_step; ++ + if (IMM12_OPERAND (frame->total_size)) + return frame->total_size; + +- HOST_WIDE_INT min_first_step +- = LARCH_STACK_ALIGN (frame->total_size - frame->fp_sp_offset); + HOST_WIDE_INT max_first_step = IMM_REACH / 2 - PREFERRED_STACK_BOUNDARY / 8; + HOST_WIDE_INT min_second_step = frame->total_size - max_first_step; + gcc_assert (min_first_step <= max_first_step); +@@ -1106,103 +1119,109 @@ loongarch_emit_stack_tie (void) + static void + loongarch_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size) + { +- /* See if we have a constant small number of probes to generate. If so, +- that's the easy case. */ +- if ((TARGET_64BIT && (first + size <= 32768)) +- || (!TARGET_64BIT && (first + size <= 2048))) +- { +- HOST_WIDE_INT i; ++ HOST_WIDE_INT rounded_size; ++ HOST_WIDE_INT interval; + +- /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until +- it exceeds SIZE. If only one probe is needed, this will not +- generate any code. Then probe at FIRST + SIZE. */ +- for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL) +- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx, +- -(first + i))); ++ if (flag_stack_clash_protection) ++ interval = STACK_CLASH_PROTECTION_GUARD_SIZE; ++ else ++ interval = PROBE_INTERVAL; + +- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx, +- -(first + size))); +- } ++ rtx r12 = LARCH_PROLOGUE_TEMP2 (Pmode); ++ rtx r14 = LARCH_PROLOGUE_TEMP3 (Pmode); + +- /* Otherwise, do the same as above, but in a loop. Note that we must be +- extra careful with variables wrapping around because we might be at +- the very top (or the very bottom) of the address space and we have +- to be able to handle this case properly; in particular, we use an +- equality test for the loop condition. */ +- else +- { +- HOST_WIDE_INT rounded_size; +- rtx r13 = LARCH_PROLOGUE_TEMP (Pmode); +- rtx r12 = LARCH_PROLOGUE_TEMP2 (Pmode); +- rtx r14 = LARCH_PROLOGUE_TEMP3 (Pmode); ++ size = size + first; + +- /* Sanity check for the addressing mode we're going to use. */ +- gcc_assert (first <= 16384); ++ /* Sanity check for the addressing mode we're going to use. */ ++ gcc_assert (first <= 16384); + ++ /* Step 1: round SIZE to the previous multiple of the interval. */ + +- /* Step 1: round SIZE to the previous multiple of the interval. */ ++ rounded_size = ROUND_DOWN (size, interval); + +- rounded_size = ROUND_DOWN (size, PROBE_INTERVAL); ++ /* Step 2: compute initial and final value of the loop counter. */ + +- /* TEST_ADDR = SP + FIRST */ +- if (first != 0) +- { +- emit_move_insn (r14, GEN_INT (first)); +- emit_insn (gen_rtx_SET (r13, gen_rtx_MINUS (Pmode, +- stack_pointer_rtx, +- r14))); +- } +- else +- emit_move_insn (r13, stack_pointer_rtx); ++ emit_move_insn (r14, GEN_INT (interval)); ++ ++ /* If rounded_size is zero, it means that the space requested by ++ the local variable is less than the interval, and there is no ++ need to display and detect the allocated space. */ ++ if (rounded_size != 0) ++ { ++ /* Step 3: the loop ++ ++ do ++ { ++ TEST_ADDR = TEST_ADDR + PROBE_INTERVAL ++ probe at TEST_ADDR ++ } ++ while (TEST_ADDR != LAST_ADDR) + +- /* Step 2: compute initial and final value of the loop counter. */ ++ probes at FIRST + N * PROBE_INTERVAL for values of N from 1 ++ until it is equal to ROUNDED_SIZE. */ + +- emit_move_insn (r14, GEN_INT (PROBE_INTERVAL)); +- /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */ +- if (rounded_size == 0) +- emit_move_insn (r12, r13); ++ if (rounded_size <= STACK_CLASH_MAX_UNROLL_PAGES * interval) ++ { ++ for (HOST_WIDE_INT i = 0; i < rounded_size; i += interval) ++ { ++ emit_insn (gen_rtx_SET (stack_pointer_rtx, ++ gen_rtx_MINUS (Pmode, ++ stack_pointer_rtx, ++ r14))); ++ emit_move_insn (gen_rtx_MEM (Pmode, ++ gen_rtx_PLUS (Pmode, ++ stack_pointer_rtx, ++ const0_rtx)), ++ const0_rtx); ++ emit_insn (gen_blockage ()); ++ } ++ dump_stack_clash_frame_info (PROBE_INLINE, size != rounded_size); ++ } + else + { + emit_move_insn (r12, GEN_INT (rounded_size)); +- emit_insn (gen_rtx_SET (r12, gen_rtx_MINUS (Pmode, r13, r12))); +- /* Step 3: the loop +- +- do +- { +- TEST_ADDR = TEST_ADDR + PROBE_INTERVAL +- probe at TEST_ADDR +- } +- while (TEST_ADDR != LAST_ADDR) +- +- probes at FIRST + N * PROBE_INTERVAL for values of N from 1 +- until it is equal to ROUNDED_SIZE. */ +- +- emit_insn (gen_probe_stack_range (Pmode, r13, r13, r12, r14)); ++ emit_insn (gen_rtx_SET (r12, ++ gen_rtx_MINUS (Pmode, ++ stack_pointer_rtx, ++ r12))); ++ ++ emit_insn (gen_probe_stack_range (Pmode, stack_pointer_rtx, ++ stack_pointer_rtx, r12, r14)); ++ emit_insn (gen_blockage ()); ++ dump_stack_clash_frame_info (PROBE_LOOP, size != rounded_size); + } ++ } ++ else ++ dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true); ++ + +- /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time +- that SIZE is equal to ROUNDED_SIZE. */ ++ /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time ++ that SIZE is equal to ROUNDED_SIZE. */ + +- if (size != rounded_size) ++ if (size != rounded_size) ++ { ++ if (size - rounded_size >= 2048) + { +- if (TARGET_64BIT) +- emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size)); +- else +- { +- HOST_WIDE_INT i; +- for (i = 2048; i < (size - rounded_size); i += 2048) +- { +- emit_stack_probe (plus_constant (Pmode, r12, -i)); +- emit_insn (gen_rtx_SET (r12, +- plus_constant (Pmode, r12, -2048))); +- } +- rtx r1 = plus_constant (Pmode, r12, +- -(size - rounded_size - i + 2048)); +- emit_stack_probe (r1); +- } ++ emit_move_insn (r14, GEN_INT (size - rounded_size)); ++ emit_insn (gen_rtx_SET (stack_pointer_rtx, ++ gen_rtx_MINUS (Pmode, ++ stack_pointer_rtx, ++ r14))); + } ++ else ++ emit_insn (gen_rtx_SET (stack_pointer_rtx, ++ gen_rtx_PLUS (Pmode, ++ stack_pointer_rtx, ++ GEN_INT (rounded_size - size)))); + } + ++ if (first) ++ { ++ emit_move_insn (r12, GEN_INT (first)); ++ emit_insn (gen_rtx_SET (stack_pointer_rtx, ++ gen_rtx_PLUS (Pmode, ++ stack_pointer_rtx, r12))); ++ } + /* Make sure nothing is scheduled before we are done. */ + emit_insn (gen_blockage ()); + } +@@ -1223,7 +1242,6 @@ loongarch_output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3) + + /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */ + xops[0] = reg1; +- xops[1] = GEN_INT (-PROBE_INTERVAL); + xops[2] = reg3; + if (TARGET_64BIT) + output_asm_insn ("sub.d\t%0,%0,%2", xops); +@@ -1249,28 +1267,11 @@ loongarch_expand_prologue (void) + { + struct loongarch_frame_info *frame = &cfun->machine->frame; + HOST_WIDE_INT size = frame->total_size; +- HOST_WIDE_INT tmp; + rtx insn; + + if (flag_stack_usage_info) + current_function_static_stack_size = size; + +- if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK +- || flag_stack_clash_protection) +- { +- if (crtl->is_leaf && !cfun->calls_alloca) +- { +- if (size > PROBE_INTERVAL && size > get_stack_check_protect ()) +- { +- tmp = size - get_stack_check_protect (); +- loongarch_emit_probe_stack_range (get_stack_check_protect (), +- tmp); +- } +- } +- else if (size > 0) +- loongarch_emit_probe_stack_range (get_stack_check_protect (), size); +- } +- + /* Save the registers. */ + if ((frame->mask | frame->fmask) != 0) + { +@@ -1283,7 +1284,6 @@ loongarch_expand_prologue (void) + loongarch_for_each_saved_reg (size, loongarch_save_reg); + } + +- + /* Set up the frame pointer, if we're using one. */ + if (frame_pointer_needed) + { +@@ -1294,7 +1294,45 @@ loongarch_expand_prologue (void) + loongarch_emit_stack_tie (); + } + +- /* Allocate the rest of the frame. */ ++ if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK ++ || flag_stack_clash_protection) ++ { ++ HOST_WIDE_INT first = get_stack_check_protect (); ++ ++ if (frame->total_size == 0) ++ { ++ /* do nothing. */ ++ dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false); ++ return; ++ } ++ ++ if (crtl->is_leaf && !cfun->calls_alloca) ++ { ++ HOST_WIDE_INT interval; ++ ++ if (flag_stack_clash_protection) ++ interval = STACK_CLASH_PROTECTION_GUARD_SIZE; ++ else ++ interval = PROBE_INTERVAL; ++ ++ if (size > interval && size > first) ++ loongarch_emit_probe_stack_range (first, size - first); ++ else ++ loongarch_emit_probe_stack_range (first, size); ++ } ++ else ++ loongarch_emit_probe_stack_range (first, size); ++ ++ if (size > 0) ++ { ++ /* Describe the effect of the previous instructions. */ ++ insn = plus_constant (Pmode, stack_pointer_rtx, -size); ++ insn = gen_rtx_SET (stack_pointer_rtx, insn); ++ loongarch_set_frame_expr (insn); ++ } ++ return; ++ } ++ + if (size > 0) + { + if (IMM12_OPERAND (-size)) +@@ -1305,7 +1343,8 @@ loongarch_expand_prologue (void) + } + else + { +- loongarch_emit_move (LARCH_PROLOGUE_TEMP (Pmode), GEN_INT (-size)); ++ loongarch_emit_move (LARCH_PROLOGUE_TEMP (Pmode), ++ GEN_INT (-size)); + emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, + LARCH_PROLOGUE_TEMP (Pmode))); + +@@ -6162,6 +6201,15 @@ loongarch_option_override_internal (struct gcc_options *opts) + gcc_unreachable (); + } + ++ /* Validate the guard size. */ ++ int guard_size = param_stack_clash_protection_guard_size; ++ ++ /* Enforce that interval is the same size as size so the mid-end does the ++ right thing. */ ++ SET_OPTION_IF_UNSET (opts, &global_options_set, ++ param_stack_clash_protection_probe_interval, ++ guard_size); ++ + loongarch_init_print_operand_punct (); + + /* Set up array to map GCC register number to debug register number. +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index a52a81adf..392597943 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -668,6 +668,10 @@ enum reg_class + + #define STACK_BOUNDARY (TARGET_ABI_LP64 ? 128 : 64) + ++/* This value controls how many pages we manually unroll the loop for when ++ generating stack clash probes. */ ++#define STACK_CLASH_MAX_UNROLL_PAGES 4 ++ + /* Symbolic macros for the registers used to return integer and floating + point values. */ + +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-1.c b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-1.c +new file mode 100644 +index 000000000..6ee589c4b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-1.c +@@ -0,0 +1,15 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-require-effective-target alloca } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE y ++#include "stack-check-alloca.h" ++ ++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r\d{1,2},-8} 1 } } */ ++/* { dg-final { scan-assembler-times {stx\.d\t\$r0,\$r3,\$r12} 1 } } */ ++ ++/* Dynamic alloca, expect loop, and 1 probes with top at sp. ++ 1st probe is inside the loop for the full guard-size allocations, second ++ probe is for the case where residual is zero. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-2.c b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-2.c +new file mode 100644 +index 000000000..8deaa5873 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-2.c +@@ -0,0 +1,12 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-require-effective-target alloca } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 0 ++#include "stack-check-alloca.h" ++ ++/* { dg-final { scan-assembler-not {stp*t*r*\.d\t\$r0,\$r3,4088} } } */ ++ ++/* Alloca of 0 should emit no probes, boundary condition. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-3.c b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-3.c +new file mode 100644 +index 000000000..e326ba9a0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-3.c +@@ -0,0 +1,12 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-require-effective-target alloca } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 100 ++#include "stack-check-alloca.h" ++ ++/* { dg-final { scan-assembler-times {st\.d\t\$r0,\$r3,104} 1 } } */ ++ ++/* Alloca is less than guard-size, 1 probe at the top of the new allocation. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-4.c b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-4.c +new file mode 100644 +index 000000000..b9f7572de +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-4.c +@@ -0,0 +1,12 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-require-effective-target alloca } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 64 * 1024 ++#include "stack-check-alloca.h" ++ ++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r\d{1,2},-8} 1 } } */ ++ ++/* Alloca is exactly one guard-size, 1 probe expected at top. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-5.c b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-5.c +new file mode 100644 +index 000000000..0ff6e493f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-5.c +@@ -0,0 +1,13 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-require-effective-target alloca } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 65 * 1024 ++#include "stack-check-alloca.h" ++ ++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r\d{1,2},-8} 1 } } */ ++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r3,1016} 1 } } */ ++ ++/* Alloca is more than one guard-page. 2 probes expected. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-6.c b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-6.c +new file mode 100644 +index 000000000..c5cf74fcb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-6.c +@@ -0,0 +1,13 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-require-effective-target alloca } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 127 * 64 * 1024 ++#include "stack-check-alloca.h" ++ ++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r\d{1,2},-8} 1 } } */ ++ ++/* Large alloca of a constant amount which is a multiple of a guard-size. ++ Loop expected with top probe. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca.h b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca.h +new file mode 100644 +index 000000000..8c75f6c0f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca.h +@@ -0,0 +1,15 @@ ++ ++/* Avoid inclusion of alloca.h, unavailable on some systems. */ ++#define alloca __builtin_alloca ++ ++__attribute__((noinline, noipa)) ++void g (char* ptr, int y) ++{ ++ ptr[y] = '\0'; ++} ++ ++void f_caller (int y) ++{ ++ char* pStr = alloca(SIZE); ++ g (pStr, y); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c +new file mode 100644 +index 000000000..f0c6877fc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c +@@ -0,0 +1,12 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16 -funwind-tables" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 128*1024 ++#include "stack-check-prologue.h" ++ ++/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 131088} 1 } } */ ++/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 0} 1 } } */ ++ ++/* Checks that the CFA notes are correct for every sp adjustment. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c +new file mode 100644 +index 000000000..c6e07bc56 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c +@@ -0,0 +1,12 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16 -funwind-tables" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 1280*1024 + 512 ++#include "stack-check-prologue.h" ++ ++/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 1311248} 1 } } */ ++/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 0} 1 } } */ ++ ++/* Checks that the CFA notes are correct for every sp adjustment. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-1.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-1.c +new file mode 100644 +index 000000000..351bc1f61 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-1.c +@@ -0,0 +1,11 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 128 ++#include "stack-check-prologue.h" ++ ++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r3,0} 0 } } */ ++ ++/* SIZE is smaller than guard-size so no probe expected. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-2.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-2.c +new file mode 100644 +index 000000000..6bba659a3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-2.c +@@ -0,0 +1,11 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 63 * 1024 ++#include "stack-check-prologue.h" ++ ++/* { dg-final { scan-assembler-times {stp*t*r*.d\t\$r0,\$r3,0} 0 } } */ ++ ++/* SIZE is smaller than guard-size so no probe expected. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-3.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-3.c +new file mode 100644 +index 000000000..164956c37 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-3.c +@@ -0,0 +1,11 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 64 * 1024 ++#include "stack-check-prologue.h" ++ ++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r3,0} 1 } } */ ++ ++/* SIZE is equal to guard-size, 1 probe expected, boundary condition. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-4.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-4.c +new file mode 100644 +index 000000000..f53da6b0d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-4.c +@@ -0,0 +1,11 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 65 * 1024 ++#include "stack-check-prologue.h" ++ ++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r3,0} 1 } } */ ++ ++/* SIZE is more than guard-size 1 probe expected. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-5.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-5.c +new file mode 100644 +index 000000000..c092317ea +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-5.c +@@ -0,0 +1,12 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 127 * 1024 ++#include "stack-check-prologue.h" ++ ++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r3,0} 1 } } */ ++ ++/* SIZE is more than 1x guard-size and remainder small than guard-size, ++ 1 probe expected, unrolled, no loop. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-6.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-6.c +new file mode 100644 +index 000000000..70a2f53f6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-6.c +@@ -0,0 +1,11 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 128 * 1024 ++#include "stack-check-prologue.h" ++ ++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r3,0} 2 } } */ ++ ++/* SIZE is more than 2x guard-size and no remainder, unrolled, no loop. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-7.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-7.c +new file mode 100644 +index 000000000..e2df89acc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-7.c +@@ -0,0 +1,12 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */ ++/* { dg-require-effective-target supports_stack_clash_protection } */ ++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */ ++ ++#define SIZE 6 * 64 * 1024 ++#include "stack-check-prologue.h" ++ ++/* { dg-final { scan-assembler-times {stp*t*r*.d\t\$r0,\$r3,0} 1 } } */ ++ ++/* SIZE is more than 4x guard-size and no remainder, 1 probe expected in a loop ++ and no residual probe. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue.h b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue.h +new file mode 100644 +index 000000000..b7e06aedb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue.h +@@ -0,0 +1,5 @@ ++int f_test (int x) ++{ ++ char arr[SIZE]; ++ return arr[x]; ++} +diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp +index c858bd93b..3a326ea1c 100644 +--- a/gcc/testsuite/lib/target-supports.exp ++++ b/gcc/testsuite/lib/target-supports.exp +@@ -11292,7 +11292,8 @@ proc check_effective_target_supports_stack_clash_protection { } { + + if { [istarget x86_64-*-*] || [istarget i?86-*-*] + || [istarget powerpc*-*-*] || [istarget rs6000*-*-*] +- || [istarget aarch64*-**] || [istarget s390*-*-*] } { ++ || [istarget aarch64*-**] || [istarget s390*-*-*] ++ || [istarget loongarch64*-**] } { + return 1 + } + return 0 +@@ -11343,6 +11344,10 @@ proc check_effective_target_caller_implicit_probes { } { + return 1; + } + ++ if { [istarget loongarch64*-*-*] } { ++ return 1; ++ } ++ + return 0 + } + +-- +2.33.0 + diff --git a/LoongArch-Optimized-multiply-instruction-generation.patch b/LoongArch-Optimized-multiply-instruction-generation.patch new file mode 100644 index 0000000000000000000000000000000000000000..1bd00c15ec2a636961b3aefb7a7fa36f596334aa --- /dev/null +++ b/LoongArch-Optimized-multiply-instruction-generation.patch @@ -0,0 +1,232 @@ +From aa1dc79c9a5ff3df241a94cbfb1c857cfa89c686 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Tue, 5 Sep 2023 11:09:03 +0800 +Subject: [PATCH 074/124] LoongArch: Optimized multiply instruction generation. + + 1. Can generate mulh.w[u] instruction. + 2. Can generate mulw.d.wu instruction. + +gcc/ChangeLog: + + * config/loongarch/loongarch.md (mulsidi3_64bit): + Field unsigned extension support. + (muldi3_highpart): Modify template name. + (mulsi3_highpart): Likewise. + (mulsidi3_64bit): Field unsigned extension support. + (muldi3_highpart): Modify muldi3_highpart to + smuldi3_highpart. + (mulsi3_highpart): Modify mulsi3_highpart to + smulsi3_highpart. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/mulw_d_wu.c: New test. + * gcc.target/loongarch/smuldi3_highpart.c: New test. + * gcc.target/loongarch/smulsi3_highpart.c: New test. + * gcc.target/loongarch/umulsi3_highpart.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.md | 66 ++++++++++++------- + .../gcc.target/loongarch/mulw_d_wu.c | 9 +++ + .../gcc.target/loongarch/smuldi3_highpart.c | 13 ++++ + .../gcc.target/loongarch/smulsi3_highpart.c | 15 +++++ + .../gcc.target/loongarch/umulsi3_highpart.c | 14 ++++ + 5 files changed, 94 insertions(+), 23 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/mulw_d_wu.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/smuldi3_highpart.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/smulsi3_highpart.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/umulsi3_highpart.c + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 11c18bf15..264cd325c 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -750,15 +750,6 @@ + [(set_attr "type" "imul") + (set_attr "mode" "")]) + +-(define_insn "mulsidi3_64bit" +- [(set (match_operand:DI 0 "register_operand" "=r") +- (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) +- (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))] +- "TARGET_64BIT" +- "mulw.d.w\t%0,%1,%2" +- [(set_attr "type" "imul") +- (set_attr "mode" "DI")]) +- + (define_insn "*mulsi3_extended" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI +@@ -787,14 +778,14 @@ + emit_insn (gen_muldi3 (low, operands[1], operands[2])); + + rtx high = gen_reg_rtx (DImode); +- emit_insn (gen_muldi3_highpart (high, operands[1], operands[2])); ++ emit_insn (gen_muldi3_highpart (high, operands[1], operands[2])); + + emit_move_insn (gen_lowpart (DImode, operands[0]), low); + emit_move_insn (gen_highpart (DImode, operands[0]), high); + DONE; + }) + +-(define_insn "muldi3_highpart" ++(define_insn "muldi3_highpart" + [(set (match_operand:DI 0 "register_operand" "=r") + (truncate:DI + (lshiftrt:TI +@@ -809,22 +800,34 @@ + (set_attr "mode" "DI")]) + + (define_expand "mulsidi3" +- [(set (match_operand:DI 0 "register_operand" "=r") ++ [(set (match_operand:DI 0 "register_operand") + (mult:DI (any_extend:DI +- (match_operand:SI 1 "register_operand" " r")) ++ (match_operand:SI 1 "register_operand")) + (any_extend:DI +- (match_operand:SI 2 "register_operand" " r"))))] +- "!TARGET_64BIT" ++ (match_operand:SI 2 "register_operand"))))] ++ "" + { +- rtx temp = gen_reg_rtx (SImode); +- emit_insn (gen_mulsi3 (temp, operands[1], operands[2])); +- emit_insn (gen_mulsi3_highpart (loongarch_subword (operands[0], true), +- operands[1], operands[2])); +- emit_insn (gen_movsi (loongarch_subword (operands[0], false), temp)); +- DONE; ++ if (!TARGET_64BIT) ++ { ++ rtx temp = gen_reg_rtx (SImode); ++ emit_insn (gen_mulsi3 (temp, operands[1], operands[2])); ++ emit_insn (gen_mulsi3_highpart (loongarch_subword (operands[0], true), ++ operands[1], operands[2])); ++ emit_insn (gen_movsi (loongarch_subword (operands[0], false), temp)); ++ DONE; ++ } + }) + +-(define_insn "mulsi3_highpart" ++(define_insn "mulsidi3_64bit" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand" "r")) ++ (any_extend:DI (match_operand:SI 2 "register_operand" "r"))))] ++ "TARGET_64BIT" ++ "mulw.d.w\t%0,%1,%2" ++ [(set_attr "type" "imul") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "mulsi3_highpart" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI + (lshiftrt:DI +@@ -833,11 +836,28 @@ + (any_extend:DI + (match_operand:SI 2 "register_operand" " r"))) + (const_int 32))))] +- "!TARGET_64BIT" ++ "" + "mulh.w\t%0,%1,%2" + [(set_attr "type" "imul") + (set_attr "mode" "SI")]) + ++;; Under the LoongArch architecture, the mulh.w[u] instruction performs ++;; sign extension by default, so the sign extension instruction can be ++;; eliminated. ++(define_peephole ++ [(set (match_operand:SI 0 "register_operand") ++ (truncate:SI ++ (lshiftrt:DI ++ (mult:DI (any_extend:DI ++ (match_operand:SI 1 "register_operand")) ++ (any_extend:DI ++ (match_operand:SI 2 "register_operand"))) ++ (const_int 32)))) ++ (set (match_operand:DI 3 "register_operand") ++ (sign_extend:DI (match_dup 0)))] ++ "TARGET_64BIT && REGNO (operands[0]) == REGNO (operands[3])" ++ "mulh.w\t%0,%1,%2") ++ + ;; + ;; .................... + ;; +diff --git a/gcc/testsuite/gcc.target/loongarch/mulw_d_wu.c b/gcc/testsuite/gcc.target/loongarch/mulw_d_wu.c +new file mode 100644 +index 000000000..16163d667 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/mulw_d_wu.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -mabi=lp64d" } */ ++/* { dg-final { scan-assembler "mulw.d.wu" } } */ ++ ++__attribute__((noipa, noinline)) unsigned long ++f(unsigned long a, unsigned long b) ++{ ++ return (unsigned long)(unsigned int)a * (unsigned long)(unsigned int)b; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/smuldi3_highpart.c b/gcc/testsuite/gcc.target/loongarch/smuldi3_highpart.c +new file mode 100644 +index 000000000..6f5c686ca +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/smuldi3_highpart.c +@@ -0,0 +1,13 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O2 -fdump-rtl-expand-all" } */ ++ ++typedef int TI __attribute ((mode(TI))); ++typedef int DI __attribute__((mode(DI))); ++ ++DI ++test (DI x, DI y) ++{ ++ return ((TI)x * y) >> 64; ++} ++ ++/* { dg-final { scan-rtl-dump "highparttmp" "expand" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/smulsi3_highpart.c b/gcc/testsuite/gcc.target/loongarch/smulsi3_highpart.c +new file mode 100644 +index 000000000..c4dbf8afc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/smulsi3_highpart.c +@@ -0,0 +1,15 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fdump-rtl-expand-all" } */ ++ ++typedef unsigned int DI __attribute__((mode(DI))); ++typedef unsigned int SI __attribute__((mode(SI))); ++ ++SI ++f (SI x, SI y) ++{ ++ return ((DI) x * y) >> 32; ++} ++ ++/* { dg-final { scan-rtl-dump "highparttmp" "expand" } } */ ++/* { dg-final { scan-assembler "mulh\\.w" } } */ ++/* { dg-final { scan-assembler-not "slli\\.w" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/umulsi3_highpart.c b/gcc/testsuite/gcc.target/loongarch/umulsi3_highpart.c +new file mode 100644 +index 000000000..e208803e2 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/umulsi3_highpart.c +@@ -0,0 +1,14 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2" } */ ++ ++typedef unsigned int DI __attribute__((mode(DI))); ++typedef unsigned int SI __attribute__((mode(SI))); ++ ++SI ++f (SI x, SI y) ++{ ++ return ((DI) x * y) >> 32; ++} ++ ++/* { dg-final { scan-assembler "mulh\\.wu" } } */ ++/* { dg-final { scan-assembler-not "slli\\.w" } } */ +-- +2.33.0 + diff --git a/LoongArch-Prepare-static-PIE-support.patch b/LoongArch-Prepare-static-PIE-support.patch new file mode 100644 index 0000000000000000000000000000000000000000..325525ab1d6b1158494754f8137480e7a36418e0 --- /dev/null +++ b/LoongArch-Prepare-static-PIE-support.patch @@ -0,0 +1,44 @@ +From aa2d9e0e1dc4bf0b612618cf0e3fcea514f92f95 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Tue, 13 Sep 2022 23:21:39 +0800 +Subject: [PATCH 018/124] LoongArch: Prepare static PIE support + +Static PIE allows us to extend the ASLR to cover static executables and +it's not too difficult to support it. On GCC side, we just pass a group +of options to the linker, like other ports with static PIE support. + +The real implementation of static PIE (rcrt1.o) will be added into Glibc +later. + +gcc/ChangeLog: + + * config/loongarch/gnu-user.h (GNU_USER_TARGET_LINK_SPEC): For + -static-pie, pass -static -pie --no-dynamic-linker -z text to + the linker, and do not pass --dynamic-linker. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/gnu-user.h | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h +index 664dc9206..c5b1afe53 100644 +--- a/gcc/config/loongarch/gnu-user.h ++++ b/gcc/config/loongarch/gnu-user.h +@@ -40,8 +40,10 @@ along with GCC; see the file COPYING3. If not see + #undef GNU_USER_TARGET_LINK_SPEC + #define GNU_USER_TARGET_LINK_SPEC \ + "%{G*} %{shared} -m " GNU_USER_LINK_EMULATION \ +- "%{!shared: %{static} %{!static: %{rdynamic:-export-dynamic} " \ +- "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}}" ++ "%{!shared: %{static} " \ ++ "%{!static: %{!static-pie: %{rdynamic:-export-dynamic} " \ ++ "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}} " \ ++ "%{static-pie: -static -pie --no-dynamic-linker -z text}}" + + + /* Similar to standard Linux, but adding -ffast-math support. */ +-- +2.33.0 + diff --git a/LoongArch-Provide-fmin-fmax-RTL-pattern.patch b/LoongArch-Provide-fmin-fmax-RTL-pattern.patch new file mode 100644 index 0000000000000000000000000000000000000000..2200e0b2dca2d837d9d9f5f461418385d2996899 --- /dev/null +++ b/LoongArch-Provide-fmin-fmax-RTL-pattern.patch @@ -0,0 +1,100 @@ +From b065c84206cdf463a377ca28f719dae7acbed0f7 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Tue, 16 Aug 2022 15:34:36 +0800 +Subject: [PATCH 009/124] LoongArch: Provide fmin/fmax RTL pattern + +We already had smin/smax RTL pattern using fmin/fmax instruction. But +for smin/smax, it's unspecified what will happen if either operand is +NaN. So we would generate calls to libc fmin/fmax functions with +-fno-finite-math-only (the default for all optimization levels expect +-Ofast). + +But, LoongArch fmin/fmax instruction is IEEE-754-2008 conformant so we +can also use the instruction for fmin/fmax pattern and avoid the library +function call. + +gcc/ChangeLog: + + * config/loongarch/loongarch.md (fmax3): New RTL pattern. + (fmin3): Likewise. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/fmax-fmin.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.md | 18 +++++++++++ + .../gcc.target/loongarch/fmax-fmin.c | 30 +++++++++++++++++++ + 2 files changed, 48 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/fmax-fmin.c + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 6b6df22a5..8e8868de9 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -1023,6 +1023,24 @@ + [(set_attr "type" "fmove") + (set_attr "mode" "")]) + ++(define_insn "fmax3" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (smax:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")))] ++ "" ++ "fmax.\t%0,%1,%2" ++ [(set_attr "type" "fmove") ++ (set_attr "mode" "")]) ++ ++(define_insn "fmin3" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (smin:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")))] ++ "" ++ "fmin.\t%0,%1,%2" ++ [(set_attr "type" "fmove") ++ (set_attr "mode" "")]) ++ + (define_insn "smaxa3" + [(set (match_operand:ANYF 0 "register_operand" "=f") + (if_then_else:ANYF +diff --git a/gcc/testsuite/gcc.target/loongarch/fmax-fmin.c b/gcc/testsuite/gcc.target/loongarch/fmax-fmin.c +new file mode 100644 +index 000000000..92cf8a150 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/fmax-fmin.c +@@ -0,0 +1,30 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mdouble-float -fno-finite-math-only" } */ ++/* { dg-final { scan-assembler "fmin\\.s" } } */ ++/* { dg-final { scan-assembler "fmin\\.d" } } */ ++/* { dg-final { scan-assembler "fmax\\.s" } } */ ++/* { dg-final { scan-assembler "fmax\\.d" } } */ ++ ++double ++_fmax(double a, double b) ++{ ++ return __builtin_fmax(a, b); ++} ++ ++float ++_fmaxf(float a, float b) ++{ ++ return __builtin_fmaxf(a, b); ++} ++ ++double ++_fmin(double a, double b) ++{ ++ return __builtin_fmin(a, b); ++} ++ ++float ++_fminf(float a, float b) ++{ ++ return __builtin_fminf(a, b); ++} +-- +2.33.0 + diff --git a/LoongArch-Remove-redundant-sign-extension-instructio.patch b/LoongArch-Remove-redundant-sign-extension-instructio.patch new file mode 100644 index 0000000000000000000000000000000000000000..7f9d62e34c6dd047c9916d318c554a65c3913212 --- /dev/null +++ b/LoongArch-Remove-redundant-sign-extension-instructio.patch @@ -0,0 +1,180 @@ +From fbe6421c5600a151dbae96d18db2fd31aca2fe7c Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Thu, 24 Aug 2023 16:44:56 +0800 +Subject: [PATCH 051/124] LoongArch: Remove redundant sign extension + instructions caused by SLT instructions. + +Since the SLT instruction does not distinguish between 64-bit operations and 32-bit +operations under the 64-bit LoongArch architecture, if the operand of slt is SImode, +the sign extension of the operand needs to be displayed. + +But similar to the test case below, the sign extension is redundant: + + extern int src1, src2, src3; + + int + test (void) + { + int data1 = src1 + src2; + int data2 = src1 + src3; + return data1 > data2 ? data1 : data2; + } +Assembly code before optimization: + ... + add.w $r4,$r4,$r14 + add.w $r13,$r13,$r14 + slli.w $r12,$r4,0 + slli.w $r14,$r13,0 + slt $r12,$r12,$r14 + masknez $r4,$r4,$r12 + maskeqz $r12,$r13,$r12 + or $r4,$r4,$r12 + slli.w $r4,$r4,0 + ... + +After optimization: + ... + add.w $r12,$r12,$r14 + add.w $r13,$r13,$r14 + slt $r4,$r12,$r13 + masknez $r12,$r12,$r4 + maskeqz $r4,$r13,$r4 + or $r4,$r12,$r4 + ... + +Similar to this test example, the two operands of SLT are obtained by the +addition operation, and add.w implicitly sign-extends, so the two operands +of SLT do not require sign-extend. + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc (loongarch_expand_conditional_move): + Optimize the function implementation. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/slt-sign-extend.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.cc | 53 +++++++++++++++++-- + .../gcc.target/loongarch/slt-sign-extend.c | 14 +++++ + 2 files changed, 63 insertions(+), 4 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/slt-sign-extend.c + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index f14de5cce..caacfa8a3 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -4380,14 +4380,30 @@ loongarch_expand_conditional_move (rtx *operands) + enum rtx_code code = GET_CODE (operands[1]); + rtx op0 = XEXP (operands[1], 0); + rtx op1 = XEXP (operands[1], 1); ++ rtx op0_extend = op0; ++ rtx op1_extend = op1; ++ ++ /* Record whether operands[2] and operands[3] modes are promoted to word_mode. */ ++ bool promote_p = false; ++ machine_mode mode = GET_MODE (operands[0]); + + if (FLOAT_MODE_P (GET_MODE (op1))) + loongarch_emit_float_compare (&code, &op0, &op1); + else + { ++ if ((REGNO (op0) == REGNO (operands[2]) ++ || (REGNO (op1) == REGNO (operands[3]) && (op1 != const0_rtx))) ++ && (GET_MODE_SIZE (GET_MODE (op0)) < word_mode)) ++ { ++ mode = word_mode; ++ promote_p = true; ++ } ++ + loongarch_extend_comparands (code, &op0, &op1); + + op0 = force_reg (word_mode, op0); ++ op0_extend = op0; ++ op1_extend = force_reg (word_mode, op1); + + if (code == EQ || code == NE) + { +@@ -4414,23 +4430,52 @@ loongarch_expand_conditional_move (rtx *operands) + && register_operand (operands[2], VOIDmode) + && register_operand (operands[3], VOIDmode)) + { +- machine_mode mode = GET_MODE (operands[0]); ++ rtx op2 = operands[2]; ++ rtx op3 = operands[3]; ++ ++ if (promote_p) ++ { ++ if (REGNO (XEXP (operands[1], 0)) == REGNO (operands[2])) ++ op2 = op0_extend; ++ else ++ { ++ loongarch_extend_comparands (code, &op2, &const0_rtx); ++ op2 = force_reg (mode, op2); ++ } ++ ++ if (REGNO (XEXP (operands[1], 1)) == REGNO (operands[3])) ++ op3 = op1_extend; ++ else ++ { ++ loongarch_extend_comparands (code, &op3, &const0_rtx); ++ op3 = force_reg (mode, op3); ++ } ++ } ++ + rtx temp = gen_reg_rtx (mode); + rtx temp2 = gen_reg_rtx (mode); + + emit_insn (gen_rtx_SET (temp, + gen_rtx_IF_THEN_ELSE (mode, cond, +- operands[2], const0_rtx))); ++ op2, const0_rtx))); + + /* Flip the test for the second operand. */ + cond = gen_rtx_fmt_ee ((code == EQ) ? NE : EQ, GET_MODE (op0), op0, op1); + + emit_insn (gen_rtx_SET (temp2, + gen_rtx_IF_THEN_ELSE (mode, cond, +- operands[3], const0_rtx))); ++ op3, const0_rtx))); + + /* Merge the two results, at least one is guaranteed to be zero. */ +- emit_insn (gen_rtx_SET (operands[0], gen_rtx_IOR (mode, temp, temp2))); ++ if (promote_p) ++ { ++ rtx temp3 = gen_reg_rtx (mode); ++ emit_insn (gen_rtx_SET (temp3, gen_rtx_IOR (mode, temp, temp2))); ++ temp3 = gen_lowpart (GET_MODE (operands[0]), temp3); ++ loongarch_emit_move (operands[0], temp3); ++ } ++ else ++ emit_insn (gen_rtx_SET (operands[0], gen_rtx_IOR (mode, temp, temp2))); + } + else + emit_insn (gen_rtx_SET (operands[0], +diff --git a/gcc/testsuite/gcc.target/loongarch/slt-sign-extend.c b/gcc/testsuite/gcc.target/loongarch/slt-sign-extend.c +new file mode 100644 +index 000000000..ea6b28b7c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/slt-sign-extend.c +@@ -0,0 +1,14 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O2" } */ ++/* { dg-final { scan-assembler-not "slli.w" } } */ ++ ++extern int src1, src2, src3; ++ ++int ++test (void) ++{ ++ int data1 = src1 + src2; ++ int data2 = src1 + src3; ++ ++ return data1 > data2 ? data1 : data2; ++} +-- +2.33.0 + diff --git a/LoongArch-Remove-the-definition-of-the-macro-LOGICAL.patch b/LoongArch-Remove-the-definition-of-the-macro-LOGICAL.patch new file mode 100644 index 0000000000000000000000000000000000000000..6ca420c106185d3a98a451ff11d54185aee631fe --- /dev/null +++ b/LoongArch-Remove-the-definition-of-the-macro-LOGICAL.patch @@ -0,0 +1,36 @@ +From 297b8c5770ad85bf468526602e28aff8a66dc01a Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Thu, 13 Apr 2023 19:24:38 +0800 +Subject: [PATCH 040/124] LoongArch: Remove the definition of the macro + LOGICAL_OP_NON_SHORT_CIRCUIT under the architecture and use the default + definition instead. + +In some cases, setting this macro as the default can reduce the number of conditional +branch instructions. + +gcc/ChangeLog: + + * config/loongarch/loongarch.h (LOGICAL_OP_NON_SHORT_CIRCUIT): Remove the macro + definition. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.h | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index 392597943..c6e37b1b4 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -836,7 +836,6 @@ typedef struct { + 1 is the default; other values are interpreted relative to that. */ + + #define BRANCH_COST(speed_p, predictable_p) loongarch_branch_cost +-#define LOGICAL_OP_NON_SHORT_CIRCUIT 0 + + /* Return the asm template for a conditional branch instruction. + OPCODE is the opcode's mnemonic and OPERANDS is the asm template for +-- +2.33.0 + diff --git a/LoongArch-Rename-frint_-fmt-to-rint-mode-2.patch b/LoongArch-Rename-frint_-fmt-to-rint-mode-2.patch new file mode 100644 index 0000000000000000000000000000000000000000..21c7f9a0b20fa47182353acf2ff966b17b107e97 --- /dev/null +++ b/LoongArch-Rename-frint_-fmt-to-rint-mode-2.patch @@ -0,0 +1,65 @@ +From 7584716b03b13c06b8bb9956b9f49e0cfc29c6b3 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Sun, 6 Nov 2022 20:41:38 +0800 +Subject: [PATCH 027/124] LoongArch: Rename frint_ to rint2 + +Use standard name so __builtin_rint{,f} can be expanded to one +instruction. + +gcc/ChangeLog: + + * config/loongarch/loongarch.md (frint_): Rename to .. + (rint2): .. this. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/frint.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.md | 4 ++-- + gcc/testsuite/gcc.target/loongarch/frint.c | 16 ++++++++++++++++ + 2 files changed, 18 insertions(+), 2 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/frint.c + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index bda34d0f3..a14ab14ac 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -2012,8 +2012,8 @@ + [(set_attr "type" "move")] + ) + +-;; Convert floating-point numbers to integers +-(define_insn "frint_" ++;; Round floating-point numbers to integers ++(define_insn "rint2" + [(set (match_operand:ANYF 0 "register_operand" "=f") + (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] + UNSPEC_FRINT))] +diff --git a/gcc/testsuite/gcc.target/loongarch/frint.c b/gcc/testsuite/gcc.target/loongarch/frint.c +new file mode 100644 +index 000000000..3ee6a8f97 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/frint.c +@@ -0,0 +1,16 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mdouble-float" } */ ++/* { dg-final { scan-assembler "frint\\.s" } } */ ++/* { dg-final { scan-assembler "frint\\.d" } } */ ++ ++double ++my_rint (double a) ++{ ++ return __builtin_rint (a); ++} ++ ++float ++my_rintf (float a) ++{ ++ return __builtin_rintf (a); ++} +-- +2.33.0 + diff --git a/LoongArch-Set-default-alignment-for-functions-and-la.patch b/LoongArch-Set-default-alignment-for-functions-and-la.patch new file mode 100644 index 0000000000000000000000000000000000000000..40a01cfaa4329af0ec9de89ec97000774367c08d --- /dev/null +++ b/LoongArch-Set-default-alignment-for-functions-and-la.patch @@ -0,0 +1,113 @@ +From 129d96b9ab5a2445d0fc2c3f7b72baa0453bd93f Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Wed, 14 Jun 2023 08:24:05 +0800 +Subject: [PATCH 047/124] LoongArch: Set default alignment for functions and + labels with -mtune + +The LA464 micro-architecture is sensitive to alignment of code. The +Loongson team has benchmarked various combinations of function, the +results [1] show that 16-byte label alignment together with 32-byte +function alignment gives best results in terms of SPEC score. + +Add a mtune-based table-driven mechanism to set the default of +-falign-{functions,labels}. As LA464 is the first (and the only for +now) uarch supported by GCC, the same setting is also used for +the "generic" -mtune=loongarch64. In the future we may set different +settings for LA{2,3,6}64 once we add the support for them. + +Bootstrapped and regtested on loongarch64-linux-gnu. Ok for trunk? + +gcc/ChangeLog: + + * config/loongarch/loongarch-tune.h (loongarch_align): New + struct. + * config/loongarch/loongarch-def.h (loongarch_cpu_align): New + array. + * config/loongarch/loongarch-def.c (loongarch_cpu_align): Define + the array. + * config/loongarch/loongarch.cc + (loongarch_option_override_internal): Set the value of + -falign-functions= if -falign-functions is enabled but no value + is given. Likewise for -falign-labels=. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch-def.c | 12 ++++++++++++ + gcc/config/loongarch/loongarch-def.h | 1 + + gcc/config/loongarch/loongarch-tune.h | 8 ++++++++ + gcc/config/loongarch/loongarch.cc | 6 ++++++ + 4 files changed, 27 insertions(+) + +diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c +index 80ab10a52..74d422ce0 100644 +--- a/gcc/config/loongarch/loongarch-def.c ++++ b/gcc/config/loongarch/loongarch-def.c +@@ -72,6 +72,18 @@ loongarch_cpu_cache[N_TUNE_TYPES] = { + }, + }; + ++struct loongarch_align ++loongarch_cpu_align[N_TUNE_TYPES] = { ++ [CPU_LOONGARCH64] = { ++ .function = "32", ++ .label = "16", ++ }, ++ [CPU_LA464] = { ++ .function = "32", ++ .label = "16", ++ }, ++}; ++ + /* The following properties cannot be looked up directly using "cpucfg". + So it is necessary to provide a default value for "unknown native" + tune targets (i.e. -mtune=native while PRID does not correspond to +diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h +index b5985f070..eb87a79a5 100644 +--- a/gcc/config/loongarch/loongarch-def.h ++++ b/gcc/config/loongarch/loongarch-def.h +@@ -144,6 +144,7 @@ extern int loongarch_cpu_issue_rate[]; + extern int loongarch_cpu_multipass_dfa_lookahead[]; + + extern struct loongarch_cache loongarch_cpu_cache[]; ++extern struct loongarch_align loongarch_cpu_align[]; + extern struct loongarch_rtx_cost_data loongarch_cpu_rtx_cost_data[]; + + #ifdef __cplusplus +diff --git a/gcc/config/loongarch/loongarch-tune.h b/gcc/config/loongarch/loongarch-tune.h +index 8e3eb2947..d961963f0 100644 +--- a/gcc/config/loongarch/loongarch-tune.h ++++ b/gcc/config/loongarch/loongarch-tune.h +@@ -48,4 +48,12 @@ struct loongarch_cache { + int simultaneous_prefetches; /* number of parallel prefetch */ + }; + ++/* Alignment for functions and labels for best performance. For new uarchs ++ the value should be measured via benchmarking. See the documentation for ++ -falign-functions and -falign-labels in invoke.texi for the format. */ ++struct loongarch_align { ++ const char *function; /* default value for -falign-functions */ ++ const char *label; /* default value for -falign-labels */ ++}; ++ + #endif /* LOONGARCH_TUNE_H */ +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 4c0f393b6..f14de5cce 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -6246,6 +6246,12 @@ loongarch_option_override_internal (struct gcc_options *opts) + && !opts->x_optimize_size) + opts->x_flag_prefetch_loop_arrays = 1; + ++ if (opts->x_flag_align_functions && !opts->x_str_align_functions) ++ opts->x_str_align_functions = loongarch_cpu_align[LARCH_ACTUAL_TUNE].function; ++ ++ if (opts->x_flag_align_labels && !opts->x_str_align_labels) ++ opts->x_str_align_labels = loongarch_cpu_align[LARCH_ACTUAL_TUNE].label; ++ + if (TARGET_DIRECT_EXTERN_ACCESS && flag_shlib) + error ("%qs cannot be used for compiling a shared library", + "-mdirect-extern-access"); +-- +2.33.0 + diff --git a/LoongArch-Slightly-simplify-loongarch_block_move_str.patch b/LoongArch-Slightly-simplify-loongarch_block_move_str.patch new file mode 100644 index 0000000000000000000000000000000000000000..5ff00e0ec79295ec9da1318396d488d812ee5927 --- /dev/null +++ b/LoongArch-Slightly-simplify-loongarch_block_move_str.patch @@ -0,0 +1,37 @@ +From f4a0248c80fedff3a6841407ff95b732dfbb93a1 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Fri, 8 Sep 2023 00:29:57 +0800 +Subject: [PATCH 073/124] LoongArch: Slightly simplify + loongarch_block_move_straight + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc (loongarch_block_move_straight): + Check precondition (delta must be a power of 2) and use + popcount_hwi instead of a homebrew loop. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.cc | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index baa5c2354..baa9831aa 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -5221,9 +5221,8 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length, + emit two ld.d/st.d pairs, one ld.w/st.w pair, and one ld.b/st.b + pair. For each load/store pair we use a dedicated register to keep + the pipeline as populated as possible. */ +- HOST_WIDE_INT num_reg = length / delta; +- for (delta_cur = delta / 2; delta_cur != 0; delta_cur /= 2) +- num_reg += !!(length & delta_cur); ++ gcc_assert (pow2p_hwi (delta)); ++ HOST_WIDE_INT num_reg = length / delta + popcount_hwi (length % delta); + + /* Allocate a buffer for the temporary registers. */ + regs = XALLOCAVEC (rtx, num_reg); +-- +2.33.0 + diff --git a/LoongArch-Subdivision-symbol-type-add-SYMBOL_PCREL-s.patch b/LoongArch-Subdivision-symbol-type-add-SYMBOL_PCREL-s.patch new file mode 100644 index 0000000000000000000000000000000000000000..4c811fbffd62b3cbcae608ef143b062a5fdadb40 --- /dev/null +++ b/LoongArch-Subdivision-symbol-type-add-SYMBOL_PCREL-s.patch @@ -0,0 +1,1234 @@ +From 68bb2a2d0b94b9bde3c22ff1dfe08abb6f036e7f Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Thu, 21 Jul 2022 10:32:51 +0800 +Subject: [PATCH 003/124] LoongArch: Subdivision symbol type, add SYMBOL_PCREL + support. + +1. Remove cModel type support other than normal. +2. The method for calling global functions changed from 'la.global + jirl' to 'bl' + when complied add '-fplt'. + +gcc/ChangeLog: + + * config/loongarch/constraints.md (a): Delete the constraint. + (b): A constant call not local address. + (h): Delete the constraint. + (t): Delete the constraint. + * config/loongarch/loongarch-opts.cc (loongarch_config_target): + Remove cModel type support other than normal. + * config/loongarch/loongarch-protos.h (enum loongarch_symbol_type): + Add new symbol type 'SYMBOL_PCREL', 'SYMBOL_TLS_IE' and 'SYMBOL_TLS_LE'. + (loongarch_split_symbol): Delete useless function declarations. + (loongarch_split_symbol_type): Delete useless function declarations. + * config/loongarch/loongarch.cc (enum loongarch_address_type): + Delete unnecessary comment information. + (loongarch_symbol_binds_local_p): Modified the judgment order of label + and symbol. + (loongarch_classify_symbol): Return symbol type. If symbol is a label, + or symbol is a local symbol return SYMBOL_PCREL. If is a tls symbol, + return SYMBOL_TLS. If is a not local symbol return SYMBOL_GOT_DISP. + (loongarch_symbolic_constant_p): Add handling of 'SYMBOL_TLS_IE' + 'SYMBOL_TLS_LE' and 'SYMBOL_PCREL'. + (loongarch_symbol_insns): Add handling of 'SYMBOL_TLS_IE' 'SYMBOL_TLS_LE' + and 'SYMBOL_PCREL'. + (loongarch_address_insns): Sort code. + (loongarch_12bit_offset_address_p): Sort code. + (loongarch_14bit_shifted_offset_address_p): Sort code. + (loongarch_call_tls_get_addr): Sort code. + (loongarch_legitimize_tls_address): Sort code. + (loongarch_output_move): Remove schema support for cmodel other than normal. + (loongarch_memmodel_needs_release_fence): Sort code. + (loongarch_print_operand): Sort code. + * config/loongarch/loongarch.h (LARCH_U12BIT_OFFSET_P): + Rename to LARCH_12BIT_OFFSET_P. + (LARCH_12BIT_OFFSET_P): New macro. + * config/loongarch/loongarch.md: Reimplement the function call. Remove schema + support for cmodel other than normal. + * config/loongarch/predicates.md (is_const_call_weak_symbol): Delete this predicate. + (is_const_call_plt_symbol): Delete this predicate. + (is_const_call_global_noplt_symbol): Delete this predicate. + (is_const_call_no_local_symbol): New predicate, determines whether it is a local + symbol or label. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/func-call-1.c: New test. + * gcc.target/loongarch/func-call-2.c: New test. + * gcc.target/loongarch/func-call-3.c: New test. + * gcc.target/loongarch/func-call-4.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/constraints.md | 24 +- + gcc/config/loongarch/loongarch-opts.cc | 7 + + gcc/config/loongarch/loongarch-protos.h | 9 +- + gcc/config/loongarch/loongarch.cc | 256 +++++++--------- + gcc/config/loongarch/loongarch.h | 2 +- + gcc/config/loongarch/loongarch.md | 279 +++--------------- + gcc/config/loongarch/predicates.md | 40 ++- + .../gcc.target/loongarch/func-call-1.c | 32 ++ + .../gcc.target/loongarch/func-call-2.c | 32 ++ + .../gcc.target/loongarch/func-call-3.c | 32 ++ + .../gcc.target/loongarch/func-call-4.c | 32 ++ + 11 files changed, 312 insertions(+), 433 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-4.c + +diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md +index d0bfddbd5..43cb7b5f0 100644 +--- a/gcc/config/loongarch/constraints.md ++++ b/gcc/config/loongarch/constraints.md +@@ -20,14 +20,14 @@ + + ;; Register constraints + +-;; "a" "A constant call global and noplt address." +-;; "b" <-----unused ++;; "a" <-----unused ++;; "b" "A constant call not local address." + ;; "c" "A constant call local address." + ;; "d" <-----unused + ;; "e" JIRL_REGS + ;; "f" FP_REGS + ;; "g" <-----unused +-;; "h" "A constant call plt address." ++;; "h" <-----unused + ;; "i" "Matches a general integer constant." (Global non-architectural) + ;; "j" SIBCALL_REGS + ;; "k" "A memory operand whose address is formed by a base register and +@@ -42,7 +42,7 @@ + ;; "q" CSR_REGS + ;; "r" GENERAL_REGS (Global non-architectural) + ;; "s" "Matches a symbolic integer constant." (Global non-architectural) +-;; "t" "A constant call weak address" ++;; "t" <-----unused + ;; "u" "A signed 52bit constant and low 32-bit is zero (for logic instructions)" + ;; "v" "A signed 64-bit constant and low 44-bit is zero (for logic instructions)." + ;; "w" "Matches any valid memory." +@@ -89,10 +89,10 @@ + ;; "<" "Matches a pre-dec or post-dec operand." (Global non-architectural) + ;; ">" "Matches a pre-inc or post-inc operand." (Global non-architectural) + +-(define_constraint "a" ++(define_constraint "b" + "@internal +- A constant call global and noplt address." +- (match_operand 0 "is_const_call_global_noplt_symbol")) ++ A constant call no local address." ++ (match_operand 0 "is_const_call_no_local_symbol")) + + (define_constraint "c" + "@internal +@@ -105,11 +105,6 @@ + (define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS" + "A floating-point register (if available).") + +-(define_constraint "h" +- "@internal +- A constant call plt address." +- (match_operand 0 "is_const_call_plt_symbol")) +- + (define_register_constraint "j" "SIBCALL_REGS" + "@internal") + +@@ -134,11 +129,6 @@ + (define_register_constraint "q" "CSR_REGS" + "A general-purpose register except for $r0 and $r1 for lcsr.") + +-(define_constraint "t" +- "@internal +- A constant call weak address." +- (match_operand 0 "is_const_call_weak_symbol")) +- + (define_constraint "u" + "A signed 52bit constant and low 32-bit is zero (for logic instructions)." + (and (match_code "const_int") +diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc +index eb9c2a52f..fc477bfd4 100644 +--- a/gcc/config/loongarch/loongarch-opts.cc ++++ b/gcc/config/loongarch/loongarch-opts.cc +@@ -376,6 +376,13 @@ fallback: + + /* 5. Target code model */ + t.cmodel = constrained.cmodel ? opt_cmodel : CMODEL_NORMAL; ++ if (t.cmodel != CMODEL_NORMAL) ++ { ++ warning (0, "%qs is not supported, now cmodel is set to 'normal'.", ++ loongarch_cmodel_strings[t.cmodel]); ++ t.cmodel = CMODEL_NORMAL; ++ } ++ + + /* Cleanup and return. */ + obstack_free (&msg_obstack, NULL); +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +index 2287fd376..080766250 100644 +--- a/gcc/config/loongarch/loongarch-protos.h ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -27,9 +27,13 @@ along with GCC; see the file COPYING3. If not see + SYMBOL_GOT_DISP + The symbol's value will be loaded directly from the GOT. + ++ SYMBOL_PCREL ++ The symbol's value will be loaded directly from data section. ++ + SYMBOL_TLS + A thread-local symbol. + ++ SYMBOL_TLS_IE + SYMBOL_TLSGD + SYMBOL_TLSLDM + UNSPEC wrappers around SYMBOL_TLS, corresponding to the +@@ -37,7 +41,10 @@ along with GCC; see the file COPYING3. If not see + */ + enum loongarch_symbol_type { + SYMBOL_GOT_DISP, ++ SYMBOL_PCREL, + SYMBOL_TLS, ++ SYMBOL_TLS_IE, ++ SYMBOL_TLS_LE, + SYMBOL_TLSGD, + SYMBOL_TLSLDM, + }; +@@ -61,7 +68,6 @@ extern int loongarch_idiv_insns (machine_mode); + #ifdef RTX_CODE + extern void loongarch_emit_binary (enum rtx_code, rtx, rtx, rtx); + #endif +-extern bool loongarch_split_symbol (rtx, rtx, machine_mode, rtx *); + extern rtx loongarch_unspec_address (rtx, enum loongarch_symbol_type); + extern rtx loongarch_strip_unspec_address (rtx); + extern void loongarch_move_integer (rtx, rtx, unsigned HOST_WIDE_INT); +@@ -154,7 +160,6 @@ extern rtx loongarch_expand_thread_pointer (rtx); + extern bool loongarch_eh_uses (unsigned int); + extern bool loongarch_epilogue_uses (unsigned int); + extern bool loongarch_load_store_bonding_p (rtx *, machine_mode, bool); +-extern bool loongarch_split_symbol_type (enum loongarch_symbol_type); + + typedef rtx (*mulsidi3_gen_fn) (rtx, rtx, rtx); + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 750d53bbe..2e2f16e72 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -114,19 +114,7 @@ enum loongarch_address_type + }; + + +-/* Information about an address described by loongarch_address_type. +- +- ADDRESS_CONST_INT +- No fields are used. +- +- ADDRESS_REG +- REG is the base register and OFFSET is the constant offset. +- +- ADDRESS_REG_REG +- A base register indexed by (optionally scaled) register. +- +- ADDRESS_SYMBOLIC +- SYMBOL_TYPE is the type of symbol that the address references. */ ++/* Information about an address described by loongarch_address_type. */ + struct loongarch_address_info + { + enum loongarch_address_type type; +@@ -1617,11 +1605,12 @@ loongarch_weak_symbol_p (const_rtx x) + bool + loongarch_symbol_binds_local_p (const_rtx x) + { +- if (LABEL_REF_P (x)) ++ if (SYMBOL_REF_P (x)) ++ return (SYMBOL_REF_DECL (x) ++ ? targetm.binds_local_p (SYMBOL_REF_DECL (x)) ++ : SYMBOL_REF_LOCAL_P (x)); ++ else + return false; +- +- return (SYMBOL_REF_DECL (x) ? targetm.binds_local_p (SYMBOL_REF_DECL (x)) +- : SYMBOL_REF_LOCAL_P (x)); + } + + /* Return true if rtx constants of mode MODE should be put into a small +@@ -1640,17 +1629,16 @@ static enum loongarch_symbol_type + loongarch_classify_symbol (const_rtx x) + { + if (LABEL_REF_P (x)) +- return SYMBOL_GOT_DISP; +- +- gcc_assert (SYMBOL_REF_P (x)); ++ return SYMBOL_PCREL; + + if (SYMBOL_REF_TLS_MODEL (x)) + return SYMBOL_TLS; + +- if (SYMBOL_REF_P (x)) ++ if (SYMBOL_REF_P (x) ++ && !loongarch_symbol_binds_local_p (x)) + return SYMBOL_GOT_DISP; + +- return SYMBOL_GOT_DISP; ++ return SYMBOL_PCREL; + } + + /* Return true if X is a symbolic constant. If it is, +@@ -1683,9 +1671,15 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_type *symbol_type) + relocations. */ + switch (*symbol_type) + { +- case SYMBOL_GOT_DISP: ++ case SYMBOL_TLS_IE: ++ case SYMBOL_TLS_LE: + case SYMBOL_TLSGD: + case SYMBOL_TLSLDM: ++ case SYMBOL_PCREL: ++ /* GAS rejects offsets outside the range [-2^31, 2^31-1]. */ ++ return sext_hwi (INTVAL (offset), 32) == INTVAL (offset); ++ ++ case SYMBOL_GOT_DISP: + case SYMBOL_TLS: + return false; + } +@@ -1707,9 +1701,14 @@ loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode) + + return 3; + ++ case SYMBOL_PCREL: ++ case SYMBOL_TLS_IE: ++ case SYMBOL_TLS_LE: ++ return 2; ++ + case SYMBOL_TLSGD: + case SYMBOL_TLSLDM: +- return 1; ++ return 3; + + case SYMBOL_TLS: + /* We don't treat a bare TLS symbol as a constant. */ +@@ -1937,11 +1936,7 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p) + switch (addr.type) + { + case ADDRESS_REG: +- return factor; +- + case ADDRESS_REG_REG: +- return factor; +- + case ADDRESS_CONST_INT: + return factor; + +@@ -1983,7 +1978,7 @@ loongarch_12bit_offset_address_p (rtx x, machine_mode mode) + return (loongarch_classify_address (&addr, x, mode, false) + && addr.type == ADDRESS_REG + && CONST_INT_P (addr.offset) +- && LARCH_U12BIT_OFFSET_P (INTVAL (addr.offset))); ++ && LARCH_12BIT_OFFSET_P (INTVAL (addr.offset))); + } + + /* Return true if X is a legitimate address with a 14-bit offset shifted 2. +@@ -2001,6 +1996,9 @@ loongarch_14bit_shifted_offset_address_p (rtx x, machine_mode mode) + && LARCH_SHIFT_2_OFFSET_P (INTVAL (addr.offset))); + } + ++/* Return true if X is a legitimate address with base and index. ++ MODE is the mode of the value being accessed. */ ++ + bool + loongarch_base_index_address_p (rtx x, machine_mode mode) + { +@@ -2310,7 +2308,7 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0) + + /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return + its address. The return value will be both a valid address and a valid +- SET_SRC (either a REG or a LO_SUM). */ ++ SET_SRC. */ + + static rtx + loongarch_legitimize_tls_address (rtx loc) +@@ -2336,7 +2334,7 @@ loongarch_legitimize_tls_address (rtx loc) + break; + + case TLS_MODEL_INITIAL_EXEC: +- /* la.tls.ie; tp-relative add */ ++ /* la.tls.ie; tp-relative add. */ + tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); + tmp = gen_reg_rtx (Pmode); + emit_insn (loongarch_got_load_tls_ie (tmp, loc)); +@@ -2345,7 +2343,7 @@ loongarch_legitimize_tls_address (rtx loc) + break; + + case TLS_MODEL_LOCAL_EXEC: +- /* la.tls.le; tp-relative add */ ++ /* la.tls.le; tp-relative add. */ + tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); + tmp = gen_reg_rtx (Pmode); + emit_insn (loongarch_got_load_tls_le (tmp, loc)); +@@ -3371,6 +3369,7 @@ loongarch_output_move (rtx dest, rtx src) + case 2: + return "st.h\t%z1,%0"; + case 4: ++ /* Matching address type with a 12bit offset. */ + if (const_arith_operand (offset, Pmode)) + return "st.w\t%z1,%0"; + else +@@ -3409,6 +3408,7 @@ loongarch_output_move (rtx dest, rtx src) + case 2: + return "ld.hu\t%0,%1"; + case 4: ++ /* Matching address type with a 12bit offset. */ + if (const_arith_operand (offset, Pmode)) + return "ld.w\t%0,%1"; + else +@@ -3436,56 +3436,16 @@ loongarch_output_move (rtx dest, rtx src) + else + gcc_unreachable (); + } ++ } + +- if (symbolic_operand (src, VOIDmode)) +- { +- if ((TARGET_CMODEL_TINY && (!loongarch_global_symbol_p (src) +- || loongarch_symbol_binds_local_p (src))) +- || (TARGET_CMODEL_TINY_STATIC && !loongarch_weak_symbol_p (src))) +- { +- /* The symbol must be aligned to 4 byte. */ +- unsigned int align; +- +- if (LABEL_REF_P (src)) +- align = 32 /* Whatever. */; +- else if (CONSTANT_POOL_ADDRESS_P (src)) +- align = GET_MODE_ALIGNMENT (get_pool_mode (src)); +- else if (TREE_CONSTANT_POOL_ADDRESS_P (src)) +- { +- tree exp = SYMBOL_REF_DECL (src); +- align = TYPE_ALIGN (TREE_TYPE (exp)); +- align = loongarch_constant_alignment (exp, align); +- } +- else if (SYMBOL_REF_DECL (src)) +- align = DECL_ALIGN (SYMBOL_REF_DECL (src)); +- else if (SYMBOL_REF_HAS_BLOCK_INFO_P (src) +- && SYMBOL_REF_BLOCK (src) != NULL) +- align = SYMBOL_REF_BLOCK (src)->alignment; +- else +- align = BITS_PER_UNIT; +- +- if (align % (4 * 8) == 0) +- return "pcaddi\t%0,%%pcrel(%1)>>2"; +- } +- if (TARGET_CMODEL_TINY +- || TARGET_CMODEL_TINY_STATIC +- || TARGET_CMODEL_NORMAL +- || TARGET_CMODEL_LARGE) +- { +- if (!loongarch_global_symbol_p (src) +- || loongarch_symbol_binds_local_p (src)) +- return "la.local\t%0,%1"; +- else +- return "la.global\t%0,%1"; +- } +- if (TARGET_CMODEL_EXTREME) +- { +- sorry ("Normal symbol loading not implemented in extreme mode."); +- gcc_unreachable (); +- } +- +- } ++ if (dest_code == REG && symbolic_operand (src, VOIDmode)) ++ { ++ if (loongarch_classify_symbol (src) == SYMBOL_PCREL) ++ return "la.local\t%0,%1"; ++ else ++ return "la.global\t%0,%1"; + } ++ + if (src_code == REG && FP_REG_P (REGNO (src))) + { + if (dest_code == REG && FP_REG_P (REGNO (dest))) +@@ -3503,6 +3463,7 @@ loongarch_output_move (rtx dest, rtx src) + return dbl_p ? "fst.d\t%1,%0" : "fst.s\t%1,%0"; + } + } ++ + if (dest_code == REG && FP_REG_P (REGNO (dest))) + { + if (src_code == MEM) +@@ -3517,6 +3478,7 @@ loongarch_output_move (rtx dest, rtx src) + return dbl_p ? "fld.d\t%0,%1" : "fld.s\t%0,%1"; + } + } ++ + gcc_unreachable (); + } + +@@ -4347,27 +4309,27 @@ loongarch_memmodel_needs_release_fence (enum memmodel model) + + /* Implement TARGET_PRINT_OPERAND. The LoongArch-specific operand codes are: + +- 'X' Print CONST_INT OP in hexadecimal format. +- 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format. ++ 'A' Print a _DB suffix if the memory model requires a release. ++ 'b' Print the address of a memory operand, without offset. ++ 'C' Print the integer branch condition for comparison OP. + 'd' Print CONST_INT OP in decimal. ++ 'F' Print the FPU branch condition for comparison OP. ++ 'G' Print a DBAR insn if the memory model requires a release. ++ 'i' Print i if the operand is not a register. + 'm' Print one less than CONST_INT OP in decimal. +- 'y' Print exact log2 of CONST_INT OP in decimal. +- 'C' Print the integer branch condition for comparison OP. + 'N' Print the inverse of the integer branch condition for comparison OP. +- 'F' Print the FPU branch condition for comparison OP. +- 'W' Print the inverse of the FPU branch condition for comparison OP. + 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...), + 'z' for (eq:?I ...), 'n' for (ne:?I ...). + 't' Like 'T', but with the EQ/NE cases reversed +- 'Y' Print loongarch_fp_conditions[INTVAL (OP)] +- 'Z' Print OP and a comma for 8CC, otherwise print nothing. +- 'z' Print $0 if OP is zero, otherwise print OP normally. +- 'b' Print the address of a memory operand, without offset. + 'V' Print exact log2 of CONST_INT OP element 0 of a replicated + CONST_VECTOR in decimal. +- 'A' Print a _DB suffix if the memory model requires a release. +- 'G' Print a DBAR insn if the memory model requires a release. +- 'i' Print i if the operand is not a register. */ ++ 'W' Print the inverse of the FPU branch condition for comparison OP. ++ 'X' Print CONST_INT OP in hexadecimal format. ++ 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format. ++ 'Y' Print loongarch_fp_conditions[INTVAL (OP)] ++ 'y' Print exact log2 of CONST_INT OP in decimal. ++ 'Z' Print OP and a comma for 8CC, otherwise print nothing. ++ 'z' Print $0 if OP is zero, otherwise print OP normally. */ + + static void + loongarch_print_operand (FILE *file, rtx op, int letter) +@@ -4385,18 +4347,13 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + + switch (letter) + { +- case 'X': +- if (CONST_INT_P (op)) +- fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op)); +- else +- output_operand_lossage ("invalid use of '%%%c'", letter); ++ case 'A': ++ if (loongarch_memmodel_needs_rel_acq_fence ((enum memmodel) INTVAL (op))) ++ fputs ("_db", file); + break; + +- case 'x': +- if (CONST_INT_P (op)) +- fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff); +- else +- output_operand_lossage ("invalid use of '%%%c'", letter); ++ case 'C': ++ loongarch_print_int_branch_condition (file, code, letter); + break; + + case 'd': +@@ -4406,6 +4363,20 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + output_operand_lossage ("invalid use of '%%%c'", letter); + break; + ++ case 'F': ++ loongarch_print_float_branch_condition (file, code, letter); ++ break; ++ ++ case 'G': ++ if (loongarch_memmodel_needs_release_fence ((enum memmodel) INTVAL (op))) ++ fputs ("dbar\t0", file); ++ break; ++ ++ case 'i': ++ if (code != REG) ++ fputs ("i", file); ++ break; ++ + case 'm': + if (CONST_INT_P (op)) + fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1); +@@ -4413,17 +4384,17 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + output_operand_lossage ("invalid use of '%%%c'", letter); + break; + +- case 'y': +- if (CONST_INT_P (op)) +- { +- int val = exact_log2 (INTVAL (op)); +- if (val != -1) +- fprintf (file, "%d", val); +- else +- output_operand_lossage ("invalid use of '%%%c'", letter); +- } +- else +- output_operand_lossage ("invalid use of '%%%c'", letter); ++ case 'N': ++ loongarch_print_int_branch_condition (file, reverse_condition (code), ++ letter); ++ break; ++ ++ case 't': ++ case 'T': ++ { ++ int truth = (code == NE) == (letter == 'T'); ++ fputc ("zfnt"[truth * 2 + FCC_REG_P (REGNO (XEXP (op, 0)))], file); ++ } + break; + + case 'V': +@@ -4441,30 +4412,36 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + output_operand_lossage ("invalid use of '%%%c'", letter); + break; + +- case 'C': +- loongarch_print_int_branch_condition (file, code, letter); +- break; +- +- case 'N': +- loongarch_print_int_branch_condition (file, reverse_condition (code), +- letter); ++ case 'W': ++ loongarch_print_float_branch_condition (file, reverse_condition (code), ++ letter); + break; + +- case 'F': +- loongarch_print_float_branch_condition (file, code, letter); ++ case 'x': ++ if (CONST_INT_P (op)) ++ fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); + break; + +- case 'W': +- loongarch_print_float_branch_condition (file, reverse_condition (code), +- letter); ++ case 'X': ++ if (CONST_INT_P (op)) ++ fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op)); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); + break; + +- case 'T': +- case 't': +- { +- int truth = (code == NE) == (letter == 'T'); +- fputc ("zfnt"[truth * 2 + FCC_REG_P (REGNO (XEXP (op, 0)))], file); +- } ++ case 'y': ++ if (CONST_INT_P (op)) ++ { ++ int val = exact_log2 (INTVAL (op)); ++ if (val != -1) ++ fprintf (file, "%d", val); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ } ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); + break; + + case 'Y': +@@ -4481,21 +4458,6 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + fputc (',', file); + break; + +- case 'A': +- if (loongarch_memmodel_needs_rel_acq_fence ((enum memmodel) INTVAL (op))) +- fputs ("_db", file); +- break; +- +- case 'G': +- if (loongarch_memmodel_needs_release_fence ((enum memmodel) INTVAL (op))) +- fputs ("dbar\t0", file); +- break; +- +- case 'i': +- if (code != REG) +- fputs ("i", file); +- break; +- + default: + switch (code) + { +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index 714401f2d..12f209047 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -617,7 +617,7 @@ enum reg_class + #define LU12I_INT(X) LU12I_OPERAND (INTVAL (X)) + #define LU32I_INT(X) LU32I_OPERAND (INTVAL (X)) + #define LU52I_INT(X) LU52I_OPERAND (INTVAL (X)) +-#define LARCH_U12BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -2048, 2047)) ++#define LARCH_12BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -2048, 2047)) + #define LARCH_9BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -256, 255)) + #define LARCH_16BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -32768, 32767)) + #define LARCH_SHIFT_2_OFFSET_P(OFFSET) (((OFFSET) & 0x3) == 0) +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 5c0445dd8..376879fbc 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -2844,48 +2844,14 @@ + }) + + (define_insn "sibcall_internal" +- [(call (mem:SI (match_operand 0 "call_insn_operand" "j,c,a,t,h")) ++ [(call (mem:SI (match_operand 0 "call_insn_operand" "j,c,b")) + (match_operand 1 "" ""))] + "SIBLING_CALL_P (insn)" +-{ +- switch (which_alternative) +- { +- case 0: +- return "jr\t%0"; +- case 1: +- if (TARGET_CMODEL_LARGE) +- return "pcaddu18i\t$r12,(%%pcrel(%0+0x20000))>>18\n\t" +- "jirl\t$r0,$r12,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; +- else if (TARGET_CMODEL_EXTREME) +- return "la.local\t$r12,$r13,%0\n\tjr\t$r12"; +- else +- return "b\t%0"; +- case 2: +- if (TARGET_CMODEL_TINY_STATIC) +- return "b\t%0"; +- else if (TARGET_CMODEL_EXTREME) +- return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; +- else +- return "la.global\t$r12,%0\n\tjr\t$r12"; +- case 3: +- if (TARGET_CMODEL_EXTREME) +- return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; +- else +- return "la.global\t$r12,%0\n\tjr\t$r12"; +- case 4: +- if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) +- return "b\t%%plt(%0)"; +- else if (TARGET_CMODEL_LARGE) +- return "pcaddu18i\t$r12,(%%plt(%0)+0x20000)>>18\n\t" +- "jirl\t$r0,$r12,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; +- else +- /* Cmodel extreme and tiny static not support plt. */ +- gcc_unreachable (); +- default: +- gcc_unreachable (); +- } +-} +- [(set_attr "jirl" "indirect,direct,direct,direct,direct")]) ++ "@ ++ jr\t%0 ++ b\t%0 ++ b\t%%plt(%0)" ++ [(set_attr "jirl" "indirect,direct,direct")]) + + (define_expand "sibcall_value" + [(parallel [(set (match_operand 0 "") +@@ -2920,96 +2886,28 @@ + + (define_insn "sibcall_value_internal" + [(set (match_operand 0 "register_operand" "") +- (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) ++ (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,b")) + (match_operand 2 "" "")))] + "SIBLING_CALL_P (insn)" +-{ +- switch (which_alternative) +- { +- case 0: +- return "jr\t%1"; +- case 1: +- if (TARGET_CMODEL_LARGE) +- return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" +- "jirl\t$r0,$r12,%%pcrel(%1+4)-((%%pcrel(%1+4+0x20000))>>18<<18)"; +- else if (TARGET_CMODEL_EXTREME) +- return "la.local\t$r12,$r13,%1\n\tjr\t$r12"; +- else +- return "b\t%1"; +- case 2: +- if (TARGET_CMODEL_TINY_STATIC) +- return "b\t%1"; +- else if (TARGET_CMODEL_EXTREME) +- return "la.global\t$r12,$r13,%1\n\tjr\t$r12"; +- else +- return "la.global\t$r12,%1\n\tjr\t$r12"; +- case 3: +- if (TARGET_CMODEL_EXTREME) +- return "la.global\t$r12,$r13,%1\n\tjr\t$r12"; +- else +- return "la.global\t$r12,%1\n\tjr\t$r12"; +- case 4: +- if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) +- return " b\t%%plt(%1)"; +- else if (TARGET_CMODEL_LARGE) +- return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" +- "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; +- else +- /* Cmodel extreme and tiny static not support plt. */ +- gcc_unreachable (); +- default: +- gcc_unreachable (); +- } +-} +- [(set_attr "jirl" "indirect,direct,direct,direct,direct")]) ++ "@ ++ jr\t%1 ++ b\t%1 ++ b\t%%plt(%1)" ++ [(set_attr "jirl" "indirect,direct,direct")]) + + (define_insn "sibcall_value_multiple_internal" + [(set (match_operand 0 "register_operand" "") +- (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) ++ (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,b")) + (match_operand 2 "" ""))) + (set (match_operand 3 "register_operand" "") + (call (mem:SI (match_dup 1)) + (match_dup 2)))] + "SIBLING_CALL_P (insn)" +-{ +- switch (which_alternative) +- { +- case 0: +- return "jr\t%1"; +- case 1: +- if (TARGET_CMODEL_LARGE) +- return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" +- "jirl\t$r0,$r12,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; +- else if (TARGET_CMODEL_EXTREME) +- return "la.local\t$r12,$r13,%1\n\tjr\t$r12"; +- else +- return "b\t%1"; +- case 2: +- if (TARGET_CMODEL_TINY_STATIC) +- return "b\t%1"; +- else if (TARGET_CMODEL_EXTREME) +- return "la.global\t$r12,$r13,%1\n\tjr\t$r12"; +- else +- return "la.global\t$r12,%1\n\tjr\t$r12"; +- case 3: +- if (TARGET_CMODEL_EXTREME) +- return "la.global\t$r12,$r13,%1\n\tjr\t$r12"; +- else +- return "la.global\t$r12,%1\n\tjr\t$r12"; +- case 4: +- if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) +- return "b\t%%plt(%1)"; +- else if (TARGET_CMODEL_LARGE) +- return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" +- "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; +- else +- /* Cmodel extreme and tiny static not support plt. */ +- gcc_unreachable (); +- default: +- gcc_unreachable (); +- } +-} +- [(set_attr "jirl" "indirect,direct,direct,direct,direct")]) ++ "@ ++ jr\t%1 ++ b\t%1 ++ b\t%%plt(%1)" ++ [(set_attr "jirl" "indirect,direct,direct")]) + + (define_expand "call" + [(parallel [(call (match_operand 0 "") +@@ -3025,50 +2923,15 @@ + }) + + (define_insn "call_internal" +- [(call (mem:SI (match_operand 0 "call_insn_operand" "e,c,a,t,h")) ++ [(call (mem:SI (match_operand 0 "call_insn_operand" "e,c,b")) + (match_operand 1 "" "")) + (clobber (reg:SI RETURN_ADDR_REGNUM))] + "" +-{ +- switch (which_alternative) +- { +- case 0: +- return "jirl\t$r1,%0,0"; +- case 1: +- if (TARGET_CMODEL_LARGE) +- return "pcaddu18i\t$r1,%%pcrel(%0+0x20000)>>18\n\t" +- "jirl\t$r1,$r1,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; +- else if (TARGET_CMODEL_EXTREME) +- return "la.local\t$r1,$r12,%0\n\tjirl\t$r1,$r1,0"; +- else +- return "bl\t%0"; +- case 2: +- if (TARGET_CMODEL_TINY_STATIC) +- return "bl\t%0"; +- else if (TARGET_CMODEL_EXTREME) +- return "la.global\t$r1,$r12,%0\n\tjirl\t$r1,$r1,0"; +- else +- return "la.global\t$r1,%0\n\tjirl\t$r1,$r1,0"; +- case 3: +- if (TARGET_CMODEL_EXTREME) +- return "la.global\t$r1,$r12,%0\n\tjirl\t$r1,$r1,0"; +- else +- return "la.global\t$r1,%0\n\tjirl\t$r1,$r1,0"; +- case 4: +- if (TARGET_CMODEL_LARGE) +- return "pcaddu18i\t$r1,(%%plt(%0)+0x20000)>>18\n\t" +- "jirl\t$r1,$r1,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; +- else if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) +- return "bl\t%%plt(%0)"; +- else +- /* Cmodel extreme and tiny static not support plt. */ +- gcc_unreachable (); +- default: +- gcc_unreachable (); +- } +-} +- [(set_attr "jirl" "indirect,direct,direct,direct,direct") +- (set_attr "insn_count" "1,2,3,3,2")]) ++ "@ ++ jirl\t$r1,%0,0 ++ bl\t%0 ++ bl\t%%plt(%0)" ++ [(set_attr "jirl" "indirect,direct,direct")]) + + (define_expand "call_value" + [(parallel [(set (match_operand 0 "") +@@ -3101,100 +2964,30 @@ + + (define_insn "call_value_internal" + [(set (match_operand 0 "register_operand" "") +- (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) ++ (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,b")) + (match_operand 2 "" ""))) + (clobber (reg:SI RETURN_ADDR_REGNUM))] + "" +-{ +- switch (which_alternative) +- { +- case 0: +- return "jirl\t$r1,%1,0"; +- case 1: +- if (TARGET_CMODEL_LARGE) +- return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" +- "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; +- else if (TARGET_CMODEL_EXTREME) +- return "la.local\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; +- else +- return "bl\t%1"; +- case 2: +- if (TARGET_CMODEL_TINY_STATIC) +- return "bl\t%1"; +- else if (TARGET_CMODEL_EXTREME) +- return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; +- else +- return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0"; +- case 3: +- if (TARGET_CMODEL_EXTREME) +- return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; +- else +- return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0"; +- case 4: +- if (TARGET_CMODEL_LARGE) +- return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" +- "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; +- else if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) +- return "bl\t%%plt(%1)"; +- else +- /* Cmodel extreme and tiny static not support plt. */ +- gcc_unreachable (); +- default: +- gcc_unreachable (); +- } +-} +- [(set_attr "jirl" "indirect,direct,direct,direct,direct") +- (set_attr "insn_count" "1,2,3,3,2")]) ++ "@ ++ jirl\t$r1,%1,0 ++ bl\t%1 ++ bl\t%%plt(%1)" ++ [(set_attr "jirl" "indirect,direct,direct")]) + + (define_insn "call_value_multiple_internal" + [(set (match_operand 0 "register_operand" "") +- (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) ++ (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,b")) + (match_operand 2 "" ""))) + (set (match_operand 3 "register_operand" "") + (call (mem:SI (match_dup 1)) + (match_dup 2))) + (clobber (reg:SI RETURN_ADDR_REGNUM))] + "" +-{ +- switch (which_alternative) +- { +- case 0: +- return "jirl\t$r1,%1,0"; +- case 1: +- if (TARGET_CMODEL_LARGE) +- return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" +- "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; +- else if (TARGET_CMODEL_EXTREME) +- return "la.local\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; +- else +- return "bl\t%1"; +- case 2: +- if (TARGET_CMODEL_TINY_STATIC) +- return "bl\t%1"; +- else if (TARGET_CMODEL_EXTREME) +- return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0 "; +- else +- return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0"; +- case 3: +- if (TARGET_CMODEL_EXTREME) +- return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; +- else +- return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0"; +- case 4: +- if (TARGET_CMODEL_LARGE) +- return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" +- "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; +- else if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) +- return "bl\t%%plt(%1)"; +- else +- /* Cmodel extreme and tiny static not support plt. */ +- gcc_unreachable (); +- default: +- gcc_unreachable (); +- } +-} +- [(set_attr "jirl" "indirect,direct,direct,direct,direct") +- (set_attr "insn_count" "1,2,3,3,2")]) ++ "@ ++ jirl\t$r1,%1,0 ++ bl\t%1 ++ bl\t%%plt(%1)" ++ [(set_attr "jirl" "indirect,direct,direct")]) + + + ;; Call subroutine returning any type. +diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md +index edd74d478..2243ef71c 100644 +--- a/gcc/config/loongarch/predicates.md ++++ b/gcc/config/loongarch/predicates.md +@@ -111,20 +111,25 @@ + (match_code "const,symbol_ref,label_ref") + { + enum loongarch_symbol_type symbol_type; ++ loongarch_symbolic_constant_p (op, &symbol_type); + +- if (!loongarch_symbolic_constant_p (op, &symbol_type)) ++ rtx offset, x = op; ++ split_const (x, &x, &offset); ++ ++ if (offset != const0_rtx) + return false; + + switch (symbol_type) + { +- case SYMBOL_GOT_DISP: +- /* Without explicit relocs, there is no special syntax for +- loading the address of a call destination into a register. +- Using "la.global JIRL_REGS,foo; jirl JIRL_REGS" would prevent the lazy +- binding of "foo", so keep the address of global symbols with the jirl +- macro. */ ++ case SYMBOL_PCREL: + return 1; + ++ case SYMBOL_GOT_DISP: ++ if (!flag_plt) ++ return false; ++ else ++ return 1; ++ + default: + return false; + } +@@ -140,22 +145,11 @@ + (match_test "loongarch_symbol_binds_local_p (op) != 0")) + (match_test "CONSTANT_P (op)"))) + +-(define_predicate "is_const_call_weak_symbol" +- (and (match_operand 0 "const_call_insn_operand") +- (not (match_operand 0 "is_const_call_local_symbol")) +- (match_test "loongarch_weak_symbol_p (op) != 0") +- (match_test "CONSTANT_P (op)"))) +- +-(define_predicate "is_const_call_plt_symbol" +- (and (match_operand 0 "const_call_insn_operand") +- (match_test "flag_plt != 0") +- (match_test "loongarch_global_symbol_noweak_p (op) != 0") +- (match_test "CONSTANT_P (op)"))) +- +-(define_predicate "is_const_call_global_noplt_symbol" ++(define_predicate "is_const_call_no_local_symbol" + (and (match_operand 0 "const_call_insn_operand") +- (match_test "flag_plt == 0") +- (match_test "loongarch_global_symbol_noweak_p (op) != 0") ++ (ior (match_test "loongarch_global_symbol_p (op) != 0") ++ (match_test "loongarch_symbol_binds_local_p (op) == 0") ++ (match_test "loongarch_weak_symbol_p (op) != 0")) + (match_test "CONSTANT_P (op)"))) + + ;; A legitimate CONST_INT operand that takes more than one instruction +@@ -219,7 +213,7 @@ + case CONST: + case SYMBOL_REF: + case LABEL_REF: +- return (loongarch_symbolic_constant_p (op, &symbol_type)); ++ return loongarch_symbolic_constant_p (op, &symbol_type); + default: + return true; + } +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-1.c b/gcc/testsuite/gcc.target/loongarch/func-call-1.c +new file mode 100644 +index 000000000..b0482761a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-1.c +@@ -0,0 +1,32 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt" } */ ++/* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */ ++/* { dg-final { scan-assembler "test1:.*bl\t%plt\\(f\\)\n" } } */ ++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ ++ ++extern void g (void); ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-2.c b/gcc/testsuite/gcc.target/loongarch/func-call-2.c +new file mode 100644 +index 000000000..f5e061c29 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-2.c +@@ -0,0 +1,32 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt" } */ ++/* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */ ++/* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */ ++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ ++ ++extern void g (void); ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-3.c b/gcc/testsuite/gcc.target/loongarch/func-call-3.c +new file mode 100644 +index 000000000..75082c574 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-3.c +@@ -0,0 +1,32 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt" } */ ++/* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test1:.*la\.global\t.*f\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ ++ ++extern void g (void); ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-4.c b/gcc/testsuite/gcc.target/loongarch/func-call-4.c +new file mode 100644 +index 000000000..e8a839549 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-4.c +@@ -0,0 +1,32 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt" } */ ++/* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */ ++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ ++ ++extern void g (void); ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} +-- +2.33.0 + diff --git a/LoongArch-Support-split-symbol.patch b/LoongArch-Support-split-symbol.patch new file mode 100644 index 0000000000000000000000000000000000000000..97fa5f9f0e98f7904bd592ec3e4a142beb3017a7 --- /dev/null +++ b/LoongArch-Support-split-symbol.patch @@ -0,0 +1,1238 @@ +From 078261cabef370e7f3201980d03bd54a049290e9 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Thu, 21 Jul 2022 11:04:08 +0800 +Subject: [PATCH 004/124] LoongArch: Support split symbol. + +Add compilation option '-mexplicit-relocs', and if enable '-mexplicit-relocs' +the symbolic address load instruction 'la.*' will be split into two instructions. +This compilation option enabled by default. + +gcc/ChangeLog: + + * common/config/loongarch/loongarch-common.cc: + Enable '-fsection-anchors' when O1 and more advanced optimization. + * config/loongarch/genopts/loongarch.opt.in: Add new option + '-mexplicit-relocs', and enable by default. + * config/loongarch/loongarch-protos.h (loongarch_split_move_insn_p): + Delete function declaration. + (loongarch_split_move_insn): Delete function declaration. + (loongarch_split_symbol_type): Add function declaration. + * config/loongarch/loongarch.cc (enum loongarch_address_type): + Add new address type 'ADDRESS_LO_SUM'. + (loongarch_classify_symbolic_expression): New function definitions. + Classify the base of symbolic expression X, given that X appears in + context CONTEXT. + (loongarch_symbol_insns): Add a judgment condition TARGET_EXPLICIT_RELOCS. + (loongarch_split_symbol_type): New function definitions. + Determines whether the symbol load should be split into two instructions. + (loongarch_valid_lo_sum_p): New function definitions. + Return true if a LO_SUM can address a value of mode MODE when the LO_SUM + symbol has type SYMBOL_TYPE. + (loongarch_classify_address): Add handling of 'LO_SUM'. + (loongarch_address_insns): Add handling of 'ADDRESS_LO_SUM'. + (loongarch_signed_immediate_p): Sort code. + (loongarch_12bit_offset_address_p): Return true if address type is ADDRESS_LO_SUM. + (loongarch_const_insns): Add handling of 'HIGH'. + (loongarch_split_move_insn_p): Add the static attribute to the function. + (loongarch_emit_set): New function definitions. + (loongarch_call_tls_get_addr): Add symbol handling when defining TARGET_EXPLICIT_RELOCS. + (loongarch_legitimize_tls_address): Add symbol handling when defining the + TARGET_EXPLICIT_RELOCS macro. + (loongarch_split_symbol): New function definitions. Split symbol. + (loongarch_legitimize_address): Add codes see if the address can split into a high part + and a LO_SUM. + (loongarch_legitimize_const_move): Add codes split moves of symbolic constants into + high and low. + (loongarch_split_move_insn): Delete function definitions. + (loongarch_output_move): Add support for HIGH and LO_SUM. + (loongarch_print_operand_reloc): New function definitions. + Print symbolic operand OP, which is part of a HIGH or LO_SUM in context CONTEXT. + (loongarch_memmodel_needs_release_fence): Sort code. + (loongarch_print_operand): Rearrange alphabetical order and add H and L to support HIGH + and LOW output. + (loongarch_print_operand_address): Add handling of 'ADDRESS_LO_SUM'. + (TARGET_MIN_ANCHOR_OFFSET): Define macro to -IMM_REACH/2. + (TARGET_MAX_ANCHOR_OFFSET): Define macro to IMM_REACH/2-1. + * config/loongarch/loongarch.md (movti): Delete the template. + (*movti): Delete the template. + (movtf): Delete the template. + (*movtf): Delete the template. + (*low): New template of normal symbol low address. + (@tls_low): New template of tls symbol low address. + (@ld_from_got): New template load address from got table. + (@ori_l_lo12): New template. + * config/loongarch/loongarch.opt: Update from loongarch.opt.in. + * config/loongarch/predicates.md: Add support for symbol_type HIGH. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/func-call-1.c: Add build option '-mno-explicit-relocs'. + * gcc.target/loongarch/func-call-2.c: Add build option '-mno-explicit-relocs'. + * gcc.target/loongarch/func-call-3.c: Add build option '-mno-explicit-relocs'. + * gcc.target/loongarch/func-call-4.c: Add build option '-mno-explicit-relocs'. + * gcc.target/loongarch/func-call-5.c: New test. + * gcc.target/loongarch/func-call-6.c: New test. + * gcc.target/loongarch/func-call-7.c: New test. + * gcc.target/loongarch/func-call-8.c: New test. + * gcc.target/loongarch/relocs-symbol-noaddend.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + .../config/loongarch/loongarch-common.cc | 1 + + gcc/config/loongarch/genopts/loongarch.opt.in | 4 + + gcc/config/loongarch/loongarch-protos.h | 3 +- + gcc/config/loongarch/loongarch.cc | 412 ++++++++++++++++-- + gcc/config/loongarch/loongarch.md | 122 +++--- + gcc/config/loongarch/loongarch.opt | 4 + + gcc/config/loongarch/predicates.md | 20 +- + .../gcc.target/loongarch/func-call-1.c | 2 +- + .../gcc.target/loongarch/func-call-2.c | 2 +- + .../gcc.target/loongarch/func-call-3.c | 2 +- + .../gcc.target/loongarch/func-call-4.c | 2 +- + .../gcc.target/loongarch/func-call-5.c | 33 ++ + .../gcc.target/loongarch/func-call-6.c | 33 ++ + .../gcc.target/loongarch/func-call-7.c | 34 ++ + .../gcc.target/loongarch/func-call-8.c | 33 ++ + .../loongarch/relocs-symbol-noaddend.c | 23 + + 16 files changed, 614 insertions(+), 116 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-5.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-6.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-7.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-8.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c + +diff --git a/gcc/common/config/loongarch/loongarch-common.cc b/gcc/common/config/loongarch/loongarch-common.cc +index ed3730fce..f8b4660fa 100644 +--- a/gcc/common/config/loongarch/loongarch-common.cc ++++ b/gcc/common/config/loongarch/loongarch-common.cc +@@ -34,6 +34,7 @@ along with GCC; see the file COPYING3. If not see + static const struct default_options loongarch_option_optimization_table[] = + { + { OPT_LEVELS_ALL, OPT_fasynchronous_unwind_tables, NULL, 1 }, ++ { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 }, + { OPT_LEVELS_NONE, 0, NULL, 0 } + }; + +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +index 61e7d72a0..6f3950093 100644 +--- a/gcc/config/loongarch/genopts/loongarch.opt.in ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -154,6 +154,10 @@ mmax-inline-memcpy-size= + Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) + -mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. + ++mexplicit-relocs ++Target Var(TARGET_EXPLICIT_RELOCS) Init(1) ++Use %reloc() assembly operators. ++ + ; The code model option names for -mcmodel. + Enum + Name(cmodel) Type(int) +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +index 080766250..cadaad751 100644 +--- a/gcc/config/loongarch/loongarch-protos.h ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -77,8 +77,6 @@ extern rtx loongarch_legitimize_call_address (rtx); + extern rtx loongarch_subword (rtx, bool); + extern bool loongarch_split_move_p (rtx, rtx); + extern void loongarch_split_move (rtx, rtx, rtx); +-extern bool loongarch_split_move_insn_p (rtx, rtx); +-extern void loongarch_split_move_insn (rtx, rtx, rtx); + extern const char *loongarch_output_move (rtx, rtx); + extern bool loongarch_cfun_has_cprestore_slot_p (void); + #ifdef RTX_CODE +@@ -160,6 +158,7 @@ extern rtx loongarch_expand_thread_pointer (rtx); + extern bool loongarch_eh_uses (unsigned int); + extern bool loongarch_epilogue_uses (unsigned int); + extern bool loongarch_load_store_bonding_p (rtx *, machine_mode, bool); ++extern bool loongarch_split_symbol_type (enum loongarch_symbol_type); + + typedef rtx (*mulsidi3_gen_fn) (rtx, rtx, rtx); + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 2e2f16e72..1b5af2c7d 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -100,6 +100,10 @@ along with GCC; see the file COPYING3. If not see + ADDRESS_REG_REG + A base register indexed by (optionally scaled) register. + ++ ADDRESS_LO_SUM ++ A LO_SUM rtx. The first operand is a valid base register and the second ++ operand is a symbolic address. ++ + ADDRESS_CONST_INT + A signed 16-bit constant address. + +@@ -109,6 +113,7 @@ enum loongarch_address_type + { + ADDRESS_REG, + ADDRESS_REG_REG, ++ ADDRESS_LO_SUM, + ADDRESS_CONST_INT, + ADDRESS_SYMBOLIC + }; +@@ -1641,6 +1646,21 @@ loongarch_classify_symbol (const_rtx x) + return SYMBOL_PCREL; + } + ++/* Classify the base of symbolic expression X, given that X appears in ++ context CONTEXT. */ ++ ++static enum loongarch_symbol_type ++loongarch_classify_symbolic_expression (rtx x) ++{ ++ rtx offset; ++ ++ split_const (x, &x, &offset); ++ if (UNSPEC_ADDRESS_P (x)) ++ return UNSPEC_ADDRESS_TYPE (x); ++ ++ return loongarch_classify_symbol (x); ++} ++ + /* Return true if X is a symbolic constant. If it is, + store the type of the symbol in *SYMBOL_TYPE. */ + +@@ -1696,7 +1716,7 @@ loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode) + case SYMBOL_GOT_DISP: + /* The constant will have to be loaded from the GOT before it + is used in an address. */ +- if (mode != MAX_MACHINE_MODE) ++ if (!TARGET_EXPLICIT_RELOCS && mode != MAX_MACHINE_MODE) + return 0; + + return 3; +@@ -1814,6 +1834,84 @@ loongarch_valid_offset_p (rtx x, machine_mode mode) + return true; + } + ++/* Should a symbol of type SYMBOL_TYPE should be split in two? */ ++ ++bool ++loongarch_split_symbol_type (enum loongarch_symbol_type symbol_type) ++{ ++ switch (symbol_type) ++ { ++ case SYMBOL_PCREL: ++ case SYMBOL_GOT_DISP: ++ case SYMBOL_TLS_IE: ++ case SYMBOL_TLS_LE: ++ case SYMBOL_TLSGD: ++ case SYMBOL_TLSLDM: ++ return true; ++ ++ case SYMBOL_TLS: ++ return false; ++ ++ default: ++ gcc_unreachable (); ++ } ++} ++ ++/* Return true if a LO_SUM can address a value of mode MODE when the ++ LO_SUM symbol has type SYMBOL_TYPE. */ ++ ++static bool ++loongarch_valid_lo_sum_p (enum loongarch_symbol_type symbol_type, ++ machine_mode mode, rtx x) ++{ ++ int align, size; ++ ++ /* Check that symbols of type SYMBOL_TYPE can be used to access values ++ of mode MODE. */ ++ if (loongarch_symbol_insns (symbol_type, mode) == 0) ++ return false; ++ ++ /* Check that there is a known low-part relocation. */ ++ if (!loongarch_split_symbol_type (symbol_type)) ++ return false; ++ ++ /* We can't tell size or alignment when we have BLKmode, so try extracing a ++ decl from the symbol if possible. */ ++ if (mode == BLKmode) ++ { ++ rtx offset; ++ ++ /* Extract the symbol from the LO_SUM operand, if any. */ ++ split_const (x, &x, &offset); ++ ++ /* Might be a CODE_LABEL. We can compute align but not size for that, ++ so don't bother trying to handle it. */ ++ if (!SYMBOL_REF_P (x)) ++ return false; ++ ++ /* Use worst case assumptions if we don't have a SYMBOL_REF_DECL. */ ++ align = (SYMBOL_REF_DECL (x) ++ ? DECL_ALIGN (SYMBOL_REF_DECL (x)) ++ : 1); ++ size = (SYMBOL_REF_DECL (x) && DECL_SIZE (SYMBOL_REF_DECL (x)) ++ ? tree_to_uhwi (DECL_SIZE (SYMBOL_REF_DECL (x))) ++ : 2*BITS_PER_WORD); ++ } ++ else ++ { ++ align = GET_MODE_ALIGNMENT (mode); ++ size = GET_MODE_BITSIZE (mode); ++ } ++ ++ /* We may need to split multiword moves, so make sure that each word ++ can be accessed without inducing a carry. */ ++ if (size > BITS_PER_WORD ++ && (!TARGET_STRICT_ALIGN || size > align)) ++ return false; ++ ++ return true; ++} ++ + static bool + loongarch_valid_index_p (struct loongarch_address_info *info, rtx x, + machine_mode mode, bool strict_p) +@@ -1880,6 +1978,26 @@ loongarch_classify_address (struct loongarch_address_info *info, rtx x, + info->offset = XEXP (x, 1); + return (loongarch_valid_base_register_p (info->reg, mode, strict_p) + && loongarch_valid_offset_p (info->offset, mode)); ++ ++ case LO_SUM: ++ info->type = ADDRESS_LO_SUM; ++ info->reg = XEXP (x, 0); ++ info->offset = XEXP (x, 1); ++ /* We have to trust the creator of the LO_SUM to do something vaguely ++ sane. Target-independent code that creates a LO_SUM should also ++ create and verify the matching HIGH. Target-independent code that ++ adds an offset to a LO_SUM must prove that the offset will not ++ induce a carry. Failure to do either of these things would be ++ a bug, and we are not required to check for it here. The MIPS ++ backend itself should only create LO_SUMs for valid symbolic ++ constants, with the high part being either a HIGH or a copy ++ of _gp. */ ++ info->symbol_type ++ = loongarch_classify_symbolic_expression (info->offset); ++ return (loongarch_valid_base_register_p (info->reg, mode, strict_p) ++ && loongarch_valid_lo_sum_p (info->symbol_type, mode, ++ info->offset)); ++ + default: + return false; + } +@@ -1940,6 +2058,9 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p) + case ADDRESS_CONST_INT: + return factor; + ++ case ADDRESS_LO_SUM: ++ return factor + 1; ++ + case ADDRESS_SYMBOLIC: + return factor * loongarch_symbol_insns (addr.symbol_type, mode); + } +@@ -1967,7 +2088,8 @@ loongarch_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits, + return loongarch_unsigned_immediate_p (x, bits, shift); + } + +-/* Return true if X is a legitimate address with a 12-bit offset. ++/* Return true if X is a legitimate address with a 12-bit offset ++ or addr.type is ADDRESS_LO_SUM. + MODE is the mode of the value being accessed. */ + + bool +@@ -1976,9 +2098,10 @@ loongarch_12bit_offset_address_p (rtx x, machine_mode mode) + struct loongarch_address_info addr; + + return (loongarch_classify_address (&addr, x, mode, false) +- && addr.type == ADDRESS_REG +- && CONST_INT_P (addr.offset) +- && LARCH_12BIT_OFFSET_P (INTVAL (addr.offset))); ++ && ((addr.type == ADDRESS_REG ++ && CONST_INT_P (addr.offset) ++ && LARCH_12BIT_OFFSET_P (INTVAL (addr.offset))) ++ || addr.type == ADDRESS_LO_SUM)); + } + + /* Return true if X is a legitimate address with a 14-bit offset shifted 2. +@@ -2020,6 +2143,14 @@ loongarch_const_insns (rtx x) + + switch (GET_CODE (x)) + { ++ case HIGH: ++ if (!loongarch_symbolic_constant_p (XEXP (x, 0), &symbol_type) ++ || !loongarch_split_symbol_type (symbol_type)) ++ return 0; ++ ++ /* This is simply a PCALAU12I. */ ++ return 1; ++ + case CONST_INT: + return loongarch_integer_cost (INTVAL (x)); + +@@ -2080,6 +2211,8 @@ loongarch_split_const_insns (rtx x) + return low + high; + } + ++static bool loongarch_split_move_insn_p (rtx dest, rtx src); ++ + /* Return the number of instructions needed to implement INSN, + given that it loads from or stores to MEM. */ + +@@ -2197,6 +2330,15 @@ loongarch_unspec_address (rtx address, enum loongarch_symbol_type symbol_type) + return loongarch_unspec_address_offset (base, offset, symbol_type); + } + ++/* Emit an instruction of the form (set TARGET SRC). */ ++ ++static rtx ++loongarch_emit_set (rtx target, rtx src) ++{ ++ emit_insn (gen_rtx_SET (target, src)); ++ return target; ++} ++ + /* If OP is an UNSPEC address, return the address to which it refers, + otherwise return OP itself. */ + +@@ -2278,6 +2420,7 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0) + { + rtx loc, a0; + rtx_insn *insn; ++ rtx tmp = gen_reg_rtx (Pmode); + + a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST); + +@@ -2288,12 +2431,22 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0) + + start_sequence (); + +- if (type == SYMBOL_TLSLDM) +- emit_insn (loongarch_got_load_tls_ld (a0, loc)); +- else if (type == SYMBOL_TLSGD) +- emit_insn (loongarch_got_load_tls_gd (a0, loc)); ++ if (TARGET_EXPLICIT_RELOCS) ++ { ++ /* Split tls symbol to high and low. */ ++ rtx high = gen_rtx_HIGH (Pmode, copy_rtx (loc)); ++ high = loongarch_force_temporary (tmp, high); ++ emit_insn (gen_tls_low (Pmode, a0, high, loc)); ++ } + else +- gcc_unreachable (); ++ { ++ if (type == SYMBOL_TLSLDM) ++ emit_insn (loongarch_got_load_tls_ld (a0, loc)); ++ else if (type == SYMBOL_TLSGD) ++ emit_insn (loongarch_got_load_tls_gd (a0, loc)); ++ else ++ gcc_unreachable (); ++ } + + insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol, + const0_rtx)); +@@ -2308,12 +2461,12 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0) + + /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return + its address. The return value will be both a valid address and a valid +- SET_SRC. */ ++ SET_SRC (either a REG or a LO_SUM). */ + + static rtx + loongarch_legitimize_tls_address (rtx loc) + { +- rtx dest, tp, tmp; ++ rtx dest, tp, tmp, tmp1, tmp2, tmp3; + enum tls_model model = SYMBOL_REF_TLS_MODEL (loc); + rtx_insn *insn; + +@@ -2334,21 +2487,45 @@ loongarch_legitimize_tls_address (rtx loc) + break; + + case TLS_MODEL_INITIAL_EXEC: +- /* la.tls.ie; tp-relative add. */ +- tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); +- tmp = gen_reg_rtx (Pmode); +- emit_insn (loongarch_got_load_tls_ie (tmp, loc)); +- dest = gen_reg_rtx (Pmode); +- emit_insn (gen_add3_insn (dest, tmp, tp)); ++ { ++ /* la.tls.ie; tp-relative add. */ ++ tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); ++ tmp1 = gen_reg_rtx (Pmode); ++ dest = gen_reg_rtx (Pmode); ++ if (TARGET_EXPLICIT_RELOCS) ++ { ++ tmp2 = loongarch_unspec_address (loc, SYMBOL_TLS_IE); ++ tmp3 = gen_reg_rtx (Pmode); ++ rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2)); ++ high = loongarch_force_temporary (tmp3, high); ++ emit_insn (gen_ld_from_got (Pmode, tmp1, high, tmp2)); ++ } ++ else ++ emit_insn (loongarch_got_load_tls_ie (tmp1, loc)); ++ emit_insn (gen_add3_insn (dest, tmp1, tp)); ++ } + break; + + case TLS_MODEL_LOCAL_EXEC: +- /* la.tls.le; tp-relative add. */ +- tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); +- tmp = gen_reg_rtx (Pmode); +- emit_insn (loongarch_got_load_tls_le (tmp, loc)); +- dest = gen_reg_rtx (Pmode); +- emit_insn (gen_add3_insn (dest, tmp, tp)); ++ { ++ /* la.tls.le; tp-relative add. */ ++ tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); ++ tmp1 = gen_reg_rtx (Pmode); ++ dest = gen_reg_rtx (Pmode); ++ ++ if (TARGET_EXPLICIT_RELOCS) ++ { ++ tmp2 = loongarch_unspec_address (loc, SYMBOL_TLS_LE); ++ tmp3 = gen_reg_rtx (Pmode); ++ rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2)); ++ high = loongarch_force_temporary (tmp3, high); ++ emit_insn (gen_ori_l_lo12 (Pmode, tmp1, high, tmp2)); ++ } ++ else ++ emit_insn (loongarch_got_load_tls_le (tmp1, loc)); ++ emit_insn (gen_add3_insn (dest, tmp1, tp)); ++ ++ } + break; + + default: +@@ -2397,6 +2574,68 @@ loongarch_force_address (rtx x, machine_mode mode) + return x; + } + ++/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise ++ it appears in a MEM of that mode. Return true if ADDR is a legitimate ++ constant in that context and can be split into high and low parts. ++ If so, and if LOW_OUT is nonnull, emit the high part and store the ++ low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise. ++ ++ Return false if build with '-mno-explicit-relocs'. ++ ++ TEMP is as for loongarch_force_temporary and is used to load the high ++ part into a register. ++ ++ When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be ++ a legitimize SET_SRC for an .md pattern, otherwise the low part ++ is guaranteed to be a legitimate address for mode MODE. */ ++ ++bool ++loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out) ++{ ++ enum loongarch_symbol_type symbol_type; ++ rtx high; ++ ++ /* If build with '-mno-explicit-relocs', don't split symbol. */ ++ if (!TARGET_EXPLICIT_RELOCS) ++ return false; ++ ++ if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE) ++ || !loongarch_symbolic_constant_p (addr, &symbol_type) ++ || loongarch_symbol_insns (symbol_type, mode) == 0 ++ || !loongarch_split_symbol_type (symbol_type)) ++ return false; ++ ++ if (temp == NULL) ++ temp = gen_reg_rtx (Pmode); ++ ++ /* Get the 12-31 bits of the address. */ ++ high = gen_rtx_HIGH (Pmode, copy_rtx (addr)); ++ high = loongarch_force_temporary (temp, high); ++ ++ if (low_out) ++ switch (symbol_type) ++ { ++ case SYMBOL_PCREL: ++ *low_out = gen_rtx_LO_SUM (Pmode, high, addr); ++ break; ++ ++ case SYMBOL_GOT_DISP: ++ /* SYMBOL_GOT_DISP symbols are loaded from the GOT. */ ++ { ++ rtx low = gen_rtx_LO_SUM (Pmode, high, addr); ++ rtx mem = gen_rtx_MEM (Pmode, low); ++ *low_out = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, mem), ++ UNSPEC_LOAD_FROM_GOT); ++ break; ++ } ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ return true; ++} ++ + /* This function is used to implement LEGITIMIZE_ADDRESS. If X can + be legitimized in a way that the generic machinery might not expect, + return a new address, otherwise return NULL. MODE is the mode of +@@ -2412,6 +2651,10 @@ loongarch_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, + if (loongarch_tls_symbol_p (x)) + return loongarch_legitimize_tls_address (x); + ++ /* See if the address can split into a high part and a LO_SUM. */ ++ if (loongarch_split_symbol (NULL, x, mode, &addr)) ++ return loongarch_force_address (addr, mode); ++ + /* Handle BASE + OFFSET using loongarch_add_offset. */ + loongarch_split_plus (x, &base, &offset); + if (offset != 0) +@@ -2499,6 +2742,13 @@ loongarch_legitimize_const_move (machine_mode mode, rtx dest, rtx src) + return; + } + ++ /* Split moves of symbolic constants into high and low. */ ++ if (loongarch_split_symbol (dest, src, MAX_MACHINE_MODE, &src)) ++ { ++ loongarch_emit_set (dest, src); ++ return; ++ } ++ + /* Generate the appropriate access sequences for TLS symbols. */ + if (loongarch_tls_symbol_p (src)) + { +@@ -3241,21 +3491,12 @@ loongarch_split_move (rtx dest, rtx src, rtx insn_) + + /* Return true if a move from SRC to DEST in INSN should be split. */ + +-bool ++static bool + loongarch_split_move_insn_p (rtx dest, rtx src) + { + return loongarch_split_move_p (dest, src); + } + +-/* Split a move from SRC to DEST in INSN, given that +- loongarch_split_move_insn_p holds. */ +- +-void +-loongarch_split_move_insn (rtx dest, rtx src, rtx insn) +-{ +- loongarch_split_move (dest, src, insn); +-} +- + /* Implement TARGET_CONSTANT_ALIGNMENT. */ + + static HOST_WIDE_INT +@@ -3369,13 +3610,16 @@ loongarch_output_move (rtx dest, rtx src) + case 2: + return "st.h\t%z1,%0"; + case 4: +- /* Matching address type with a 12bit offset. */ +- if (const_arith_operand (offset, Pmode)) ++ /* Matching address type with a 12bit offset and ++ ADDRESS_LO_SUM. */ ++ if (const_arith_operand (offset, Pmode) ++ || GET_CODE (offset) == LO_SUM) + return "st.w\t%z1,%0"; + else + return "stptr.w\t%z1,%0"; + case 8: +- if (const_arith_operand (offset, Pmode)) ++ if (const_arith_operand (offset, Pmode) ++ || GET_CODE (offset) == LO_SUM) + return "st.d\t%z1,%0"; + else + return "stptr.d\t%z1,%0"; +@@ -3408,13 +3652,16 @@ loongarch_output_move (rtx dest, rtx src) + case 2: + return "ld.hu\t%0,%1"; + case 4: +- /* Matching address type with a 12bit offset. */ +- if (const_arith_operand (offset, Pmode)) ++ /* Matching address type with a 12bit offset and ++ ADDRESS_LO_SUM. */ ++ if (const_arith_operand (offset, Pmode) ++ || GET_CODE (offset) == LO_SUM) + return "ld.w\t%0,%1"; + else + return "ldptr.w\t%0,%1"; + case 8: +- if (const_arith_operand (offset, Pmode)) ++ if (const_arith_operand (offset, Pmode) ++ || GET_CODE (offset) == LO_SUM) + return "ld.d\t%0,%1"; + else + return "ldptr.d\t%0,%1"; +@@ -3423,6 +3670,21 @@ loongarch_output_move (rtx dest, rtx src) + } + } + ++ if (src_code == HIGH) ++ { ++ rtx offset, x; ++ split_const (XEXP (src, 0), &x, &offset); ++ enum loongarch_symbol_type type = SYMBOL_PCREL; ++ ++ if (UNSPEC_ADDRESS_P (x)) ++ type = UNSPEC_ADDRESS_TYPE (x); ++ ++ if (type == SYMBOL_TLS_LE) ++ return "lu12i.w\t%0,%h1"; ++ else ++ return "pcalau12i\t%0,%h1"; ++ } ++ + if (src_code == CONST_INT) + { + if (LU12I_INT (src)) +@@ -3438,7 +3700,8 @@ loongarch_output_move (rtx dest, rtx src) + } + } + +- if (dest_code == REG && symbolic_operand (src, VOIDmode)) ++ if (!TARGET_EXPLICIT_RELOCS ++ && dest_code == REG && symbolic_operand (src, VOIDmode)) + { + if (loongarch_classify_symbol (src) == SYMBOL_PCREL) + return "la.local\t%0,%1"; +@@ -4307,6 +4570,49 @@ loongarch_memmodel_needs_release_fence (enum memmodel model) + } + } + ++/* Print symbolic operand OP, which is part of a HIGH or LO_SUM ++ in context CONTEXT. HI_RELOC indicates a high-part reloc. */ ++ ++static void ++loongarch_print_operand_reloc (FILE *file, rtx op, bool hi_reloc) ++{ ++ const char *reloc; ++ ++ switch (loongarch_classify_symbolic_expression (op)) ++ { ++ case SYMBOL_PCREL: ++ reloc = hi_reloc ? "%pc_hi20" : "%pc_lo12"; ++ break; ++ ++ case SYMBOL_GOT_DISP: ++ reloc = hi_reloc ? "%got_pc_hi20" : "%got_pc_lo12"; ++ break; ++ ++ case SYMBOL_TLS_IE: ++ reloc = hi_reloc ? "%ie_pc_hi20" : "%ie_pc_lo12"; ++ break; ++ ++ case SYMBOL_TLS_LE: ++ reloc = hi_reloc ? "%le_hi20" : "%le_lo12"; ++ break; ++ ++ case SYMBOL_TLSGD: ++ reloc = hi_reloc ? "%gd_pc_hi20" : "%got_pc_lo12"; ++ break; ++ ++ case SYMBOL_TLSLDM: ++ reloc = hi_reloc ? "%ld_pc_hi20" : "%got_pc_lo12"; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ fprintf (file, "%s(", reloc); ++ output_addr_const (file, loongarch_strip_unspec_address (op)); ++ fputc (')', file); ++} ++ + /* Implement TARGET_PRINT_OPERAND. The LoongArch-specific operand codes are: + + 'A' Print a _DB suffix if the memory model requires a release. +@@ -4315,7 +4621,10 @@ loongarch_memmodel_needs_release_fence (enum memmodel model) + 'd' Print CONST_INT OP in decimal. + 'F' Print the FPU branch condition for comparison OP. + 'G' Print a DBAR insn if the memory model requires a release. ++ 'H' Print address 52-61bit relocation associated with OP. ++ 'h' Print the high-part relocation associated with OP. + 'i' Print i if the operand is not a register. ++ 'L' Print the low-part relocation associated with OP. + 'm' Print one less than CONST_INT OP in decimal. + 'N' Print the inverse of the integer branch condition for comparison OP. + 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...), +@@ -4372,11 +4681,21 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + fputs ("dbar\t0", file); + break; + ++ case 'h': ++ if (code == HIGH) ++ op = XEXP (op, 0); ++ loongarch_print_operand_reloc (file, op, true /* hi_reloc */); ++ break; ++ + case 'i': + if (code != REG) + fputs ("i", file); + break; + ++ case 'L': ++ loongarch_print_operand_reloc (file, op, false /* lo_reloc */); ++ break; ++ + case 'm': + if (CONST_INT_P (op)) + fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1); +@@ -4517,6 +4836,11 @@ loongarch_print_operand_address (FILE *file, machine_mode /* mode */, rtx x) + reg_names[REGNO (addr.offset)]); + return; + ++ case ADDRESS_LO_SUM: ++ fprintf (file, "%s,", reg_names[REGNO (addr.reg)]); ++ loongarch_print_operand_reloc (file, addr.offset, false /* hi_reloc */); ++ return; ++ + case ADDRESS_CONST_INT: + fprintf (file, "%s,", reg_names[GP_REG_FIRST]); + output_addr_const (file, x); +@@ -5891,6 +6215,12 @@ loongarch_starting_frame_offset (void) + #undef TARGET_TRAMPOLINE_INIT + #define TARGET_TRAMPOLINE_INIT loongarch_trampoline_init + ++#undef TARGET_MIN_ANCHOR_OFFSET ++#define TARGET_MIN_ANCHOR_OFFSET (-IMM_REACH/2) ++ ++#undef TARGET_MAX_ANCHOR_OFFSET ++#define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1) ++ + #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV + #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV loongarch_atomic_assign_expand_fenv + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 376879fbc..6b6df22a5 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -57,6 +57,10 @@ + ;; CRC + UNSPEC_CRC + UNSPEC_CRCC ++ ++ UNSPEC_LOAD_FROM_GOT ++ UNSPEC_ORI_L_LO12 ++ UNSPEC_TLS_LOW + ]) + + (define_c_enum "unspecv" [ +@@ -1743,73 +1747,6 @@ + [(set_attr "move_type" "move,load,store") + (set_attr "mode" "DF")]) + +- +-;; 128-bit integer moves +- +-(define_expand "movti" +- [(set (match_operand:TI 0) +- (match_operand:TI 1))] +- "TARGET_64BIT" +-{ +- if (loongarch_legitimize_move (TImode, operands[0], operands[1])) +- DONE; +-}) +- +-(define_insn "*movti" +- [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r,r,m") +- (match_operand:TI 1 "move_operand" "r,i,m,rJ"))] +- "TARGET_64BIT +- && (register_operand (operands[0], TImode) +- || reg_or_0_operand (operands[1], TImode))" +- { return loongarch_output_move (operands[0], operands[1]); } +- [(set_attr "move_type" "move,const,load,store") +- (set (attr "mode") +- (if_then_else (eq_attr "move_type" "imul") +- (const_string "SI") +- (const_string "TI")))]) +- +-;; 128-bit floating point moves +- +-(define_expand "movtf" +- [(set (match_operand:TF 0) +- (match_operand:TF 1))] +- "TARGET_64BIT" +-{ +- if (loongarch_legitimize_move (TFmode, operands[0], operands[1])) +- DONE; +-}) +- +-;; This pattern handles both hard- and soft-float cases. +-(define_insn "*movtf" +- [(set (match_operand:TF 0 "nonimmediate_operand" "=r,r,m,f,r,f,m") +- (match_operand:TF 1 "move_operand" "rG,m,rG,rG,f,m,f"))] +- "TARGET_64BIT +- && (register_operand (operands[0], TFmode) +- || reg_or_0_operand (operands[1], TFmode))" +- "#" +- [(set_attr "move_type" "move,load,store,mgtf,mftg,fpload,fpstore") +- (set_attr "mode" "TF")]) +- +-(define_split +- [(set (match_operand:MOVE64 0 "nonimmediate_operand") +- (match_operand:MOVE64 1 "move_operand"))] +- "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1])" +- [(const_int 0)] +-{ +- loongarch_split_move_insn (operands[0], operands[1], curr_insn); +- DONE; +-}) +- +-(define_split +- [(set (match_operand:MOVE128 0 "nonimmediate_operand") +- (match_operand:MOVE128 1 "move_operand"))] +- "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1])" +- [(const_int 0)] +-{ +- loongarch_split_move_insn (operands[0], operands[1], curr_insn); +- DONE; +-}) +- + ;; Emit a doubleword move in which exactly one of the operands is + ;; a floating-point register. We can't just emit two normal moves + ;; because of the constraints imposed by the FPU register model; +@@ -1938,6 +1875,57 @@ + [(set_attr "type" "arith") + (set_attr "mode" "DI")]) + ++;; Instructions for adding the low 12 bits of an address to a register. ++;; Operand 2 is the address: loongarch_print_operand works out which relocation ++;; should be applied. ++ ++(define_insn "*low" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (lo_sum:P (match_operand:P 1 "register_operand" " r") ++ (match_operand:P 2 "symbolic_operand" "")))] ++ "TARGET_EXPLICIT_RELOCS" ++ "addi.\t%0,%1,%L2" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "@tls_low" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec:P [(mem:P (lo_sum:P (match_operand:P 1 "register_operand" "r") ++ (match_operand:P 2 "symbolic_operand" "")))] ++ UNSPEC_TLS_LOW))] ++ "TARGET_EXPLICIT_RELOCS" ++ "addi.\t%0,%1,%L2" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "")]) ++ ++;; Instructions for loading address from GOT entry. ++;; operands[1] is pc plus the high half of the address difference with the got ++;; entry; ++;; operands[2] is low 12 bits for low 12 bit of the address difference with the ++;; got entry. ++;; loongarch_print_operand works out which relocation should be applied. ++ ++(define_insn "@ld_from_got" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec:P [(mem:P (lo_sum:P ++ (match_operand:P 1 "register_operand" "r") ++ (match_operand:P 2 "symbolic_operand")))] ++ UNSPEC_LOAD_FROM_GOT))] ++ "TARGET_EXPLICIT_RELOCS" ++ "ld.\t%0,%1,%L2" ++ [(set_attr "type" "move")] ++) ++ ++(define_insn "@ori_l_lo12" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec:P [(match_operand:P 1 "register_operand" "r") ++ (match_operand:P 2 "symbolic_operand")] ++ UNSPEC_ORI_L_LO12))] ++ "" ++ "ori\t%0,%1,%L2" ++ [(set_attr "type" "move")] ++) ++ + ;; Convert floating-point numbers to integers + (define_insn "frint_" + [(set (match_operand:ANYF 0 "register_operand" "=f") +diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt +index 3ff0d8604..7a8c5b444 100644 +--- a/gcc/config/loongarch/loongarch.opt ++++ b/gcc/config/loongarch/loongarch.opt +@@ -161,6 +161,10 @@ mmax-inline-memcpy-size= + Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) + -mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. + ++mexplicit-relocs ++Target Var(TARGET_EXPLICIT_RELOCS) Init(1) ++Use %reloc() assembly operators. ++ + ; The code model option names for -mcmodel. + Enum + Name(cmodel) Type(int) +diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md +index 2243ef71c..cd3528c7c 100644 +--- a/gcc/config/loongarch/predicates.md ++++ b/gcc/config/loongarch/predicates.md +@@ -110,6 +110,10 @@ + (define_predicate "const_call_insn_operand" + (match_code "const,symbol_ref,label_ref") + { ++ /* Split symbol to high and low if return false. ++ If defined TARGET_CMODEL_LARGE, all symbol would be splited, ++ else if offset is not zero, the symbol would be splited. */ ++ + enum loongarch_symbol_type symbol_type; + loongarch_symbolic_constant_p (op, &symbol_type); + +@@ -125,7 +129,7 @@ + return 1; + + case SYMBOL_GOT_DISP: +- if (!flag_plt) ++ if (TARGET_CMODEL_LARGE || !flag_plt) + return false; + else + return 1; +@@ -213,7 +217,19 @@ + case CONST: + case SYMBOL_REF: + case LABEL_REF: +- return loongarch_symbolic_constant_p (op, &symbol_type); ++ return (loongarch_symbolic_constant_p (op, &symbol_type) ++ && (!TARGET_EXPLICIT_RELOCS ++ || !loongarch_split_symbol_type (symbol_type))); ++ ++ case HIGH: ++ /* '-mno-explicit-relocs' don't generate high/low pairs. */ ++ if (!TARGET_EXPLICIT_RELOCS) ++ return false; ++ ++ op = XEXP (op, 0); ++ return (loongarch_symbolic_constant_p (op, &symbol_type) ++ && loongarch_split_symbol_type (symbol_type)); ++ + default: + return true; + } +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-1.c b/gcc/testsuite/gcc.target/loongarch/func-call-1.c +index b0482761a..01b8ea23f 100644 +--- a/gcc/testsuite/gcc.target/loongarch/func-call-1.c ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-1.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mabi=lp64d -O0 -fpic -fplt" } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mno-explicit-relocs" } */ + /* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */ + /* { dg-final { scan-assembler "test1:.*bl\t%plt\\(f\\)\n" } } */ + /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-2.c b/gcc/testsuite/gcc.target/loongarch/func-call-2.c +index f5e061c29..4565baaec 100644 +--- a/gcc/testsuite/gcc.target/loongarch/func-call-2.c ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-2.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt" } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mno-explicit-relocs" } */ + /* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */ + /* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */ + /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-3.c b/gcc/testsuite/gcc.target/loongarch/func-call-3.c +index 75082c574..4f669a029 100644 +--- a/gcc/testsuite/gcc.target/loongarch/func-call-3.c ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-3.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt" } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mno-explicit-relocs" } */ + /* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */ + /* { dg-final { scan-assembler "test1:.*la\.global\t.*f\n\tjirl" } } */ + /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-4.c b/gcc/testsuite/gcc.target/loongarch/func-call-4.c +index e8a839549..943adb640 100644 +--- a/gcc/testsuite/gcc.target/loongarch/func-call-4.c ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-4.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt" } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mno-explicit-relocs" } */ + /* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */ + /* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */ + /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-5.c b/gcc/testsuite/gcc.target/loongarch/func-call-5.c +new file mode 100644 +index 000000000..2c2a1c8a1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-5.c +@@ -0,0 +1,33 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mexplicit-relocs" } */ ++/* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */ ++/* { dg-final { scan-assembler "test1:.*bl\t%plt\\(f\\)\n" } } */ ++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ ++ ++extern void g (void); ++ ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-6.c b/gcc/testsuite/gcc.target/loongarch/func-call-6.c +new file mode 100644 +index 000000000..4b0e4266e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-6.c +@@ -0,0 +1,33 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mexplicit-relocs" } */ ++/* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */ ++/* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */ ++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ ++ ++extern void g (void); ++ ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-7.c b/gcc/testsuite/gcc.target/loongarch/func-call-7.c +new file mode 100644 +index 000000000..51792711f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-7.c +@@ -0,0 +1,34 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs" } */ ++/* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test1:.*pcalau12i\t.*%got_pc_hi20\\(f\\)\n\tld\.d\t.*%got_pc_lo12\\(f\\)\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ ++ ++ ++extern void g (void); ++ ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-8.c b/gcc/testsuite/gcc.target/loongarch/func-call-8.c +new file mode 100644 +index 000000000..330140d88 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/func-call-8.c +@@ -0,0 +1,33 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs" } */ ++/* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */ ++/* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */ ++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */ ++ ++extern void g (void); ++ ++void ++f (void) ++{} ++ ++static void ++l (void) ++{} ++ ++void ++test (void) ++{ ++ g (); ++} ++ ++void ++test1 (void) ++{ ++ f (); ++} ++ ++void ++test2 (void) ++{ ++ l (); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c b/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c +new file mode 100644 +index 000000000..bfcc9bc33 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c +@@ -0,0 +1,23 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mabi=lp64d -mexplicit-relocs -fno-pic -O2" } */ ++/* { dg-final { scan-assembler "pcalau12i.*%pc_hi20\\(\.LANCHOR0\\)\n" } } */ ++/* { dg-final { scan-assembler "addi\.d.*%pc_lo12\\(\.LANCHOR0\\)\n" } } */ ++/* { dg-final { scan-assembler "ldptr.d\t\\\$r4,.*,0\n" } } */ ++/* { dg-final { scan-assembler "ld.d\t\\\$r5,.*,8\n" } } */ ++/* { dg-final { scan-assembler-not "\.LANCHOR0+8" } } */ ++ ++ ++struct S ++{ ++ char *a; ++ unsigned short int b; ++}; ++ ++struct S s1; ++ ++void test(struct S); ++void test1(void) ++{ ++ test(s1); ++} ++ +-- +2.33.0 + diff --git a/LoongArch-Support-storing-floating-point-zero-into-M.patch b/LoongArch-Support-storing-floating-point-zero-into-M.patch new file mode 100644 index 0000000000000000000000000000000000000000..360c71fae6d30abaa7b059f9c7c0d1ea6486b590 --- /dev/null +++ b/LoongArch-Support-storing-floating-point-zero-into-M.patch @@ -0,0 +1,90 @@ +From de803130fa7d33afaf6e2fc42ef1cd97e45edf96 Mon Sep 17 00:00:00 2001 +From: Guo Jie +Date: Fri, 1 Sep 2023 16:35:05 +0800 +Subject: [PATCH 057/124] LoongArch: Support storing floating-point zero into + MEM[base + index]. + +v2: Modify commit message. + +gcc/ChangeLog: + + * config/loongarch/loongarch.md: Support 'G' -> 'k' in + movsf_hardfloat and movdf_hardfloat. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/const-double-zero-stx.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.md | 12 ++++++------ + .../loongarch/const-double-zero-stx.c | 18 ++++++++++++++++++ + 2 files changed, 24 insertions(+), 6 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/const-double-zero-stx.c + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index a5e9352ca..2d269794f 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -1915,13 +1915,13 @@ + }) + + (define_insn "*movsf_hardfloat" +- [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,f,k,m,*f,*r,*r,*r,*m") +- (match_operand:SF 1 "move_operand" "f,G,m,f,k,f,G,*r,*f,*G*r,*m,*r"))] ++ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,f,k,m,k,*f,*r,*r,*r,*m") ++ (match_operand:SF 1 "move_operand" "f,G,m,f,k,f,G,G,*r,*f,*G*r,*m,*r"))] + "TARGET_HARD_FLOAT + && (register_operand (operands[0], SFmode) + || reg_or_0_operand (operands[1], SFmode))" + { return loongarch_output_move (operands[0], operands[1]); } +- [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,fpload,fpstore,store,mgtf,mftg,move,load,store") ++ [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,fpload,fpstore,store,store,mgtf,mftg,move,load,store") + (set_attr "mode" "SF")]) + + (define_insn "*movsf_softfloat" +@@ -1946,13 +1946,13 @@ + }) + + (define_insn "*movdf_hardfloat" +- [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,f,k,m,*f,*r,*r,*r,*m") +- (match_operand:DF 1 "move_operand" "f,G,m,f,k,f,G,*r,*f,*r*G,*m,*r"))] ++ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,f,k,m,k,*f,*r,*r,*r,*m") ++ (match_operand:DF 1 "move_operand" "f,G,m,f,k,f,G,G,*r,*f,*r*G,*m,*r"))] + "TARGET_DOUBLE_FLOAT + && (register_operand (operands[0], DFmode) + || reg_or_0_operand (operands[1], DFmode))" + { return loongarch_output_move (operands[0], operands[1]); } +- [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,fpload,fpstore,store,mgtf,mftg,move,load,store") ++ [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,fpload,fpstore,store,store,mgtf,mftg,move,load,store") + (set_attr "mode" "DF")]) + + (define_insn "*movdf_softfloat" +diff --git a/gcc/testsuite/gcc.target/loongarch/const-double-zero-stx.c b/gcc/testsuite/gcc.target/loongarch/const-double-zero-stx.c +new file mode 100644 +index 000000000..8fb04be8f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/const-double-zero-stx.c +@@ -0,0 +1,18 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2" } */ ++/* { dg-final { scan-assembler-times {stx\..\t\$r0} 2 } } */ ++ ++extern float arr_f[]; ++extern double arr_d[]; ++ ++void ++test_f (int base, int index) ++{ ++ arr_f[base + index] = 0.0; ++} ++ ++void ++test_d (int base, int index) ++{ ++ arr_d[base + index] = 0.0; ++} +-- +2.33.0 + diff --git a/LoongArch-Use-LSX-and-LASX-for-block-move.patch b/LoongArch-Use-LSX-and-LASX-for-block-move.patch new file mode 100644 index 0000000000000000000000000000000000000000..77ead5f5f6ea8130d42f51bf4910396369b57a8a --- /dev/null +++ b/LoongArch-Use-LSX-and-LASX-for-block-move.patch @@ -0,0 +1,154 @@ +From 01b932dead0e7bcc05aae2ac742c76b5fcac5ae7 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Tue, 5 Sep 2023 21:02:38 +0800 +Subject: [PATCH 072/124] LoongArch: Use LSX and LASX for block move + +gcc/ChangeLog: + + * config/loongarch/loongarch.h (LARCH_MAX_MOVE_PER_INSN): + Define to the maximum amount of bytes able to be loaded or + stored with one machine instruction. + * config/loongarch/loongarch.cc (loongarch_mode_for_move_size): + New static function. + (loongarch_block_move_straight): Call + loongarch_mode_for_move_size for machine_mode to be moved. + (loongarch_expand_block_move): Use LARCH_MAX_MOVE_PER_INSN + instead of UNITS_PER_WORD. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/memcpy-vec-1.c: New test. + * gcc.target/loongarch/memcpy-vec-2.c: New test. + * gcc.target/loongarch/memcpy-vec-3.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.cc | 22 +++++++++++++++---- + gcc/config/loongarch/loongarch.h | 3 +++ + .../gcc.target/loongarch/memcpy-vec-1.c | 11 ++++++++++ + .../gcc.target/loongarch/memcpy-vec-2.c | 12 ++++++++++ + .../gcc.target/loongarch/memcpy-vec-3.c | 6 +++++ + 5 files changed, 50 insertions(+), 4 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/memcpy-vec-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/memcpy-vec-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 4b0944d56..baa5c2354 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -5187,6 +5187,20 @@ loongarch_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED, + return true; + } + ++static machine_mode ++loongarch_mode_for_move_size (HOST_WIDE_INT size) ++{ ++ switch (size) ++ { ++ case 32: ++ return V32QImode; ++ case 16: ++ return V16QImode; ++ } ++ ++ return int_mode_for_size (size * BITS_PER_UNIT, 0).require (); ++} ++ + /* Emit straight-line code to move LENGTH bytes from SRC to DEST. + Assume that the areas do not overlap. */ + +@@ -5216,7 +5230,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length, + + for (delta_cur = delta, i = 0, offs = 0; offs < length; delta_cur /= 2) + { +- mode = int_mode_for_size (delta_cur * BITS_PER_UNIT, 0).require (); ++ mode = loongarch_mode_for_move_size (delta_cur); + + for (; offs + delta_cur <= length; offs += delta_cur, i++) + { +@@ -5227,7 +5241,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length, + + for (delta_cur = delta, i = 0, offs = 0; offs < length; delta_cur /= 2) + { +- mode = int_mode_for_size (delta_cur * BITS_PER_UNIT, 0).require (); ++ mode = loongarch_mode_for_move_size (delta_cur); + + for (; offs + delta_cur <= length; offs += delta_cur, i++) + loongarch_emit_move (adjust_address (dest, mode, offs), regs[i]); +@@ -5322,8 +5336,8 @@ loongarch_expand_block_move (rtx dest, rtx src, rtx r_length, rtx r_align) + + HOST_WIDE_INT align = INTVAL (r_align); + +- if (!TARGET_STRICT_ALIGN || align > UNITS_PER_WORD) +- align = UNITS_PER_WORD; ++ if (!TARGET_STRICT_ALIGN || align > LARCH_MAX_MOVE_PER_INSN) ++ align = LARCH_MAX_MOVE_PER_INSN; + + if (length <= align * LARCH_MAX_MOVE_OPS_STRAIGHT) + { +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index b2295c589..c7e91a06d 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -1181,6 +1181,9 @@ typedef struct { + least twice. */ + #define LARCH_MAX_MOVE_OPS_STRAIGHT (LARCH_MAX_MOVE_OPS_PER_LOOP_ITER * 2) + ++#define LARCH_MAX_MOVE_PER_INSN \ ++ (ISA_HAS_LASX ? 32 : (ISA_HAS_LSX ? 16 : UNITS_PER_WORD)) ++ + /* The base cost of a memcpy call, for MOVE_RATIO and friends. These + values were determined experimentally by benchmarking with CSiBE. + */ +diff --git a/gcc/testsuite/gcc.target/loongarch/memcpy-vec-1.c b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-1.c +new file mode 100644 +index 000000000..8d9fedc9e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-1.c +@@ -0,0 +1,11 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -mabi=lp64d -march=la464 -mno-strict-align" } */ ++/* { dg-final { scan-assembler-times "xvst" 2 } } */ ++/* { dg-final { scan-assembler-times "\tvst" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.d|stptr\\.d" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.w|stptr\\.w" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.h" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.b" 1 } } */ ++ ++extern char a[], b[]; ++void test() { __builtin_memcpy(a, b, 95); } +diff --git a/gcc/testsuite/gcc.target/loongarch/memcpy-vec-2.c b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-2.c +new file mode 100644 +index 000000000..6b28b884d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-2.c +@@ -0,0 +1,12 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -mabi=lp64d -march=la464 -mno-strict-align" } */ ++/* { dg-final { scan-assembler-times "xvst" 2 } } */ ++/* { dg-final { scan-assembler-times "\tvst" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.d|stptr\\.d" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.w|stptr\\.w" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.h" 1 } } */ ++/* { dg-final { scan-assembler-times "st\\.b" 1 } } */ ++ ++typedef char __attribute__ ((vector_size (32), aligned (32))) vec; ++extern vec a[], b[]; ++void test() { __builtin_memcpy(a, b, 95); } +diff --git a/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c +new file mode 100644 +index 000000000..233ed2150 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c +@@ -0,0 +1,6 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -march=la464 -mabi=lp64d -mstrict-align" } */ ++/* { dg-final { scan-assembler-not "vst" } } */ ++ ++extern char a[], b[]; ++void test() { __builtin_memcpy(a, b, 16); } +-- +2.33.0 + diff --git a/LoongArch-Use-UNSPEC-for-fmin-fmax-RTL-pattern-PR105.patch b/LoongArch-Use-UNSPEC-for-fmin-fmax-RTL-pattern-PR105.patch new file mode 100644 index 0000000000000000000000000000000000000000..105d2bc033381dbb333c6b4d94860058f4f05bfd --- /dev/null +++ b/LoongArch-Use-UNSPEC-for-fmin-fmax-RTL-pattern-PR105.patch @@ -0,0 +1,68 @@ +From d6c7fe54314f7633d779da1a635a8f0832b8f4f6 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Sat, 24 Sep 2022 20:47:22 +0800 +Subject: [PATCH 021/124] LoongArch: Use UNSPEC for fmin/fmax RTL pattern + [PR105414] + +I made a mistake defining fmin/fmax RTL patterns in r13-2085: I used +smin and smax in the definition mistakenly. This causes the optimizer +to perform constant folding as if fmin/fmax was "really" smin/smax +operations even with -fsignaling-nans. Then pr105414.c fails. + +We don't have fmin/fmax RTL codes for now (PR107013) so we can only use +an UNSPEC for fmin and fmax patterns. + +gcc/ChangeLog: + + PR tree-optimization/105414 + * config/loongarch/loongarch.md (UNSPEC_FMAX): New unspec. + (UNSPEC_FMIN): Likewise. + (fmax3): Use UNSPEC_FMAX instead of smax. + (fmin3): Use UNSPEC_FMIN instead of smin. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.md | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 3787fd823..214b14bdd 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -35,6 +35,8 @@ + ;; Floating point unspecs. + UNSPEC_FRINT + UNSPEC_FCLASS ++ UNSPEC_FMAX ++ UNSPEC_FMIN + + ;; Override return address for exception handling. + UNSPEC_EH_RETURN +@@ -1032,8 +1034,9 @@ + + (define_insn "fmax3" + [(set (match_operand:ANYF 0 "register_operand" "=f") +- (smax:ANYF (match_operand:ANYF 1 "register_operand" "f") +- (match_operand:ANYF 2 "register_operand" "f")))] ++ (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" "f")) ++ (use (match_operand:ANYF 2 "register_operand" "f"))] ++ UNSPEC_FMAX))] + "" + "fmax.\t%0,%1,%2" + [(set_attr "type" "fmove") +@@ -1041,8 +1044,9 @@ + + (define_insn "fmin3" + [(set (match_operand:ANYF 0 "register_operand" "=f") +- (smin:ANYF (match_operand:ANYF 1 "register_operand" "f") +- (match_operand:ANYF 2 "register_operand" "f")))] ++ (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" "f")) ++ (use (match_operand:ANYF 2 "register_operand" "f"))] ++ UNSPEC_FMIN))] + "" + "fmin.\t%0,%1,%2" + [(set_attr "type" "fmove") +-- +2.33.0 + diff --git a/LoongArch-Use-bstrins-instruction-for-a-mask-and-a-m.patch b/LoongArch-Use-bstrins-instruction-for-a-mask-and-a-m.patch new file mode 100644 index 0000000000000000000000000000000000000000..46f1dee2bea4502ada876cbd0ad8b5bbc4a2c14c --- /dev/null +++ b/LoongArch-Use-bstrins-instruction-for-a-mask-and-a-m.patch @@ -0,0 +1,336 @@ +From 1c63c61f6508e3c718be79dd27dda25db2b291ee Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Tue, 5 Sep 2023 19:42:30 +0800 +Subject: [PATCH 068/124] LoongArch: Use bstrins instruction for (a & ~mask) + and (a & mask) | (b & ~mask) [PR111252] + +If mask is a constant with value ((1 << N) - 1) << M we can perform this +optimization. + +gcc/ChangeLog: + + PR target/111252 + * config/loongarch/loongarch-protos.h + (loongarch_pre_reload_split): Declare new function. + (loongarch_use_bstrins_for_ior_with_mask): Likewise. + * config/loongarch/loongarch.cc + (loongarch_pre_reload_split): Implement. + (loongarch_use_bstrins_for_ior_with_mask): Likewise. + * config/loongarch/predicates.md (ins_zero_bitmask_operand): + New predicate. + * config/loongarch/loongarch.md (bstrins__for_mask): + New define_insn_and_split. + (bstrins__for_ior_mask): Likewise. + (define_peephole2): Further optimize code sequence produced by + bstrins__for_ior_mask if possible. + +gcc/testsuite/ChangeLog: + + * g++.target/loongarch/bstrins-compile.C: New test. + * g++.target/loongarch/bstrins-run.C: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch-protos.h | 4 +- + gcc/config/loongarch/loongarch.cc | 36 ++++++++ + gcc/config/loongarch/loongarch.md | 91 +++++++++++++++++++ + gcc/config/loongarch/predicates.md | 8 ++ + .../g++.target/loongarch/bstrins-compile.C | 22 +++++ + .../g++.target/loongarch/bstrins-run.C | 65 +++++++++++++ + 6 files changed, 225 insertions(+), 1 deletion(-) + create mode 100644 gcc/testsuite/g++.target/loongarch/bstrins-compile.C + create mode 100644 gcc/testsuite/g++.target/loongarch/bstrins-run.C + +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +index 133ec9fa8..ea61cf567 100644 +--- a/gcc/config/loongarch/loongarch-protos.h ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -56,7 +56,7 @@ enum loongarch_symbol_type { + }; + #define NUM_SYMBOL_TYPES (SYMBOL_TLSLDM + 1) + +-/* Routines implemented in loongarch.c. */ ++/* Routines implemented in loongarch.cc. */ + extern rtx loongarch_emit_move (rtx, rtx); + extern HOST_WIDE_INT loongarch_initial_elimination_offset (int, int); + extern void loongarch_expand_prologue (void); +@@ -163,6 +163,8 @@ extern const char *current_section_name (void); + extern unsigned int current_section_flags (void); + extern bool loongarch_use_ins_ext_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT); + extern bool loongarch_check_zero_div_p (void); ++extern bool loongarch_pre_reload_split (void); ++extern int loongarch_use_bstrins_for_ior_with_mask (machine_mode, rtx *); + + union loongarch_gen_fn_ptrs + { +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index dae35a479..4b0944d56 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -5478,6 +5478,42 @@ loongarch_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos) + return true; + } + ++/* Predicate for pre-reload splitters with associated instructions, ++ which can match any time before the split1 pass (usually combine), ++ then are unconditionally split in that pass and should not be ++ matched again afterwards. */ ++ ++bool loongarch_pre_reload_split (void) ++{ ++ return (can_create_pseudo_p () ++ && !(cfun->curr_properties & PROP_rtl_split_insns)); ++} ++ ++/* Check if we can use bstrins. for ++ op0 = (op1 & op2) | (op3 & op4) ++ where op0, op1, op3 are regs, and op2, op4 are integer constants. */ ++int ++loongarch_use_bstrins_for_ior_with_mask (machine_mode mode, rtx *op) ++{ ++ unsigned HOST_WIDE_INT mask1 = UINTVAL (op[2]); ++ unsigned HOST_WIDE_INT mask2 = UINTVAL (op[4]); ++ ++ if (mask1 != ~mask2 || !mask1 || !mask2) ++ return 0; ++ ++ /* Try to avoid a right-shift. */ ++ if (low_bitmask_len (mode, mask1) != -1) ++ return -1; ++ ++ if (low_bitmask_len (mode, mask2 >> (ffs_hwi (mask2) - 1)) != -1) ++ return 1; ++ ++ if (low_bitmask_len (mode, mask1 >> (ffs_hwi (mask1) - 1)) != -1) ++ return -1; ++ ++ return 0; ++} ++ + /* Print the text for PRINT_OPERAND punctation character CH to FILE. + The punctuation characters are: + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 3dde0ceb1..11c18bf15 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -1322,6 +1322,97 @@ + [(set_attr "move_type" "pick_ins") + (set_attr "mode" "")]) + ++(define_insn_and_split "*bstrins__for_mask" ++ [(set (match_operand:GPR 0 "register_operand") ++ (and:GPR (match_operand:GPR 1 "register_operand") ++ (match_operand:GPR 2 "ins_zero_bitmask_operand")))] ++ "" ++ "#" ++ "" ++ [(set (match_dup 0) (match_dup 1)) ++ (set (zero_extract:GPR (match_dup 0) (match_dup 2) (match_dup 3)) ++ (const_int 0))] ++ { ++ unsigned HOST_WIDE_INT mask = ~UINTVAL (operands[2]); ++ int lo = ffs_hwi (mask) - 1; ++ int len = low_bitmask_len (mode, mask >> lo); ++ ++ len = MIN (len, GET_MODE_BITSIZE (mode) - lo); ++ operands[2] = GEN_INT (len); ++ operands[3] = GEN_INT (lo); ++ }) ++ ++(define_insn_and_split "*bstrins__for_ior_mask" ++ [(set (match_operand:GPR 0 "register_operand") ++ (ior:GPR (and:GPR (match_operand:GPR 1 "register_operand") ++ (match_operand:GPR 2 "const_int_operand")) ++ (and:GPR (match_operand:GPR 3 "register_operand") ++ (match_operand:GPR 4 "const_int_operand"))))] ++ "loongarch_pre_reload_split () && \ ++ loongarch_use_bstrins_for_ior_with_mask (mode, operands)" ++ "#" ++ "" ++ [(set (match_dup 0) (match_dup 1)) ++ (set (zero_extract:GPR (match_dup 0) (match_dup 2) (match_dup 4)) ++ (match_dup 3))] ++ { ++ if (loongarch_use_bstrins_for_ior_with_mask (mode, operands) < 0) ++ { ++ std::swap (operands[1], operands[3]); ++ std::swap (operands[2], operands[4]); ++ } ++ ++ unsigned HOST_WIDE_INT mask = ~UINTVAL (operands[2]); ++ int lo = ffs_hwi (mask) - 1; ++ int len = low_bitmask_len (mode, mask >> lo); ++ ++ len = MIN (len, GET_MODE_BITSIZE (mode) - lo); ++ operands[2] = GEN_INT (len); ++ operands[4] = GEN_INT (lo); ++ ++ if (lo) ++ { ++ rtx tmp = gen_reg_rtx (mode); ++ emit_move_insn (tmp, gen_rtx_ASHIFTRT(mode, operands[3], ++ GEN_INT (lo))); ++ operands[3] = tmp; ++ } ++ }) ++ ++;; We always avoid the shift operation in bstrins__for_ior_mask ++;; if possible, but the result may be sub-optimal when one of the masks ++;; is (1 << N) - 1 and one of the src register is the dest register. ++;; For example: ++;; move t0, a0 ++;; move a0, a1 ++;; bstrins.d a0, t0, 42, 0 ++;; ret ++;; using a shift operation would be better: ++;; srai.d t0, a1, 43 ++;; bstrins.d a0, t0, 63, 43 ++;; ret ++;; unfortunately we cannot figure it out in split1: before reload we cannot ++;; know if the dest register is one of the src register. Fix it up in ++;; peephole2. ++(define_peephole2 ++ [(set (match_operand:GPR 0 "register_operand") ++ (match_operand:GPR 1 "register_operand")) ++ (set (match_dup 1) (match_operand:GPR 2 "register_operand")) ++ (set (zero_extract:GPR (match_dup 1) ++ (match_operand:SI 3 "const_int_operand") ++ (const_int 0)) ++ (match_dup 0))] ++ "peep2_reg_dead_p (3, operands[0])" ++ [(const_int 0)] ++ { ++ int len = GET_MODE_BITSIZE (mode) - INTVAL (operands[3]); ++ ++ emit_insn (gen_ashr3 (operands[0], operands[2], operands[3])); ++ emit_insn (gen_insv (operands[1], GEN_INT (len), operands[3], ++ operands[0])); ++ DONE; ++ }) ++ + (define_insn "*iorhi3" + [(set (match_operand:HI 0 "register_operand" "=r,r") + (ior:HI (match_operand:HI 1 "register_operand" "%r,r") +diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md +index cf9361b73..ad6cee5c4 100644 +--- a/gcc/config/loongarch/predicates.md ++++ b/gcc/config/loongarch/predicates.md +@@ -408,6 +408,14 @@ + (define_predicate "muldiv_target_operand" + (match_operand 0 "register_operand")) + ++(define_predicate "ins_zero_bitmask_operand" ++ (and (match_code "const_int") ++ (match_test "INTVAL (op) != -1") ++ (match_test "INTVAL (op) & 1") ++ (match_test "low_bitmask_len (mode, \ ++ ~UINTVAL (op) | (~UINTVAL(op) - 1)) \ ++ > 12"))) ++ + (define_predicate "const_call_insn_operand" + (match_code "const,symbol_ref,label_ref") + { +diff --git a/gcc/testsuite/g++.target/loongarch/bstrins-compile.C b/gcc/testsuite/g++.target/loongarch/bstrins-compile.C +new file mode 100644 +index 000000000..3c0db1de4 +--- /dev/null ++++ b/gcc/testsuite/g++.target/loongarch/bstrins-compile.C +@@ -0,0 +1,22 @@ ++/* { dg-do compile } */ ++/* { dg-options "-std=c++14 -O2 -march=loongarch64 -mabi=lp64d" } */ ++/* { dg-final { scan-assembler "bstrins\\.d.*7,4" } } */ ++/* { dg-final { scan-assembler "bstrins\\.d.*15,4" } } */ ++/* { dg-final { scan-assembler "bstrins\\.d.*31,4" } } */ ++/* { dg-final { scan-assembler "bstrins\\.d.*47,4" } } */ ++/* { dg-final { scan-assembler "bstrins\\.d.*3,0" } } */ ++ ++typedef unsigned long u64; ++ ++template ++u64 ++test (u64 a, u64 b) ++{ ++ return (a & mask) | (b & ~mask); ++} ++ ++template u64 test<0x0000'0000'0000'00f0l> (u64, u64); ++template u64 test<0x0000'0000'0000'fff0l> (u64, u64); ++template u64 test<0x0000'0000'ffff'fff0l> (u64, u64); ++template u64 test<0x0000'ffff'ffff'fff0l> (u64, u64); ++template u64 test<0xffff'ffff'ffff'fff0l> (u64, u64); +diff --git a/gcc/testsuite/g++.target/loongarch/bstrins-run.C b/gcc/testsuite/g++.target/loongarch/bstrins-run.C +new file mode 100644 +index 000000000..68913d5e0 +--- /dev/null ++++ b/gcc/testsuite/g++.target/loongarch/bstrins-run.C +@@ -0,0 +1,65 @@ ++/* { dg-do run } */ ++/* { dg-options "-O2" } */ ++ ++typedef unsigned long gr; ++ ++template ++struct mask { ++ enum { value = (1ul << r) - (1ul << l) }; ++}; ++ ++template ++struct mask { ++ enum { value = -(1ul << l) }; ++}; ++ ++__attribute__ ((noipa)) void ++test (gr a, gr b, gr mask, gr out) ++{ ++ if (((a & mask) | (b & ~mask)) != out) ++ __builtin_abort (); ++} ++ ++__attribute__ ((noipa)) gr ++no_optimize (gr x) ++{ ++ return x; ++} ++ ++template ++struct test1 { ++ static void ++ run (void) ++ { ++ gr m = mask::value; ++ gr a = no_optimize (-1ul); ++ gr b = no_optimize (0); ++ ++ test (a, b, m, (a & m) | (b & ~m)); ++ test (a, b, ~m, (a & ~m) | (b & m)); ++ test (a, 0, ~m, a & ~m); ++ ++ test1::run (); ++ } ++}; ++ ++template ++struct test1 { ++ static void run (void) {} ++}; ++ ++template ++void ++test2 (void) ++{ ++ test1::run (); ++ test2 (); ++} ++ ++template <> void test2 (void) {} ++ ++int ++main () ++{ ++ test2<0> (); ++} +-- +2.33.0 + diff --git a/LoongArch-add-mdirect-extern-access-option.patch b/LoongArch-add-mdirect-extern-access-option.patch new file mode 100644 index 0000000000000000000000000000000000000000..0ed7acea9227579d712002f36500304f1d2e8035 --- /dev/null +++ b/LoongArch-add-mdirect-extern-access-option.patch @@ -0,0 +1,157 @@ +From 22f6d3fad184d87f3dac7634537fdbc24846bab9 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Thu, 1 Sep 2022 18:38:14 +0800 +Subject: [PATCH 016/124] LoongArch: add -mdirect-extern-access option + +As a new target, LoongArch does not use copy relocation as it's +problematic in some circumstances. One bad consequence is we are +emitting GOT for all accesses to all extern objects with default +visibility. The use of GOT is not needed in statically linked +executables, OS kernels etc. The GOT entry just wastes space, and the +GOT access just slow down the execution in those environments. + +Before -mexplicit-relocs, we used "-Wa,-mla-global-with-pcrel" to tell +the assembler not to use GOT for extern access. But with +-mexplicit-relocs, we have to opt the logic in GCC. + +The name "-mdirect-extern-access" is learnt from x86 port. + +gcc/ChangeLog: + + * config/loongarch/genopts/loongarch.opt.in: Add + -mdirect-extern-access option. + * config/loongarch/loongarch.opt: Regenerate. + * config/loongarch/loongarch.cc + (loongarch_symbol_binds_local_p): Return true if + TARGET_DIRECT_EXTERN_ACCESS. + (loongarch_option_override_internal): Complain if + -mdirect-extern-access is used with -fPIC or -fpic. + * doc/invoke.texi: Document -mdirect-extern-access for + LoongArch. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/direct-extern-1.c: New test. + * gcc.target/loongarch/direct-extern-2.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/genopts/loongarch.opt.in | 4 ++++ + gcc/config/loongarch/loongarch.cc | 6 ++++++ + gcc/config/loongarch/loongarch.opt | 4 ++++ + gcc/doc/invoke.texi | 15 +++++++++++++++ + .../gcc.target/loongarch/direct-extern-1.c | 6 ++++++ + .../gcc.target/loongarch/direct-extern-2.c | 6 ++++++ + 6 files changed, 41 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/direct-extern-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/direct-extern-2.c + +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +index ebdd9538d..e10618777 100644 +--- a/gcc/config/loongarch/genopts/loongarch.opt.in ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -184,3 +184,7 @@ Enum(cmodel) String(@@STR_CMODEL_EXTREME@@) Value(CMODEL_EXTREME) + mcmodel= + Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(CMODEL_NORMAL) + Specify the code model. ++ ++mdirect-extern-access ++Target Var(TARGET_DIRECT_EXTERN_ACCESS) Init(0) ++Avoid using the GOT to access external symbols. +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 77e3a1053..c9187bf81 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -1610,6 +1610,9 @@ loongarch_weak_symbol_p (const_rtx x) + bool + loongarch_symbol_binds_local_p (const_rtx x) + { ++ if (TARGET_DIRECT_EXTERN_ACCESS) ++ return true; ++ + if (SYMBOL_REF_P (x)) + return (SYMBOL_REF_DECL (x) + ? targetm.binds_local_p (SYMBOL_REF_DECL (x)) +@@ -6093,6 +6096,9 @@ loongarch_option_override_internal (struct gcc_options *opts) + if (loongarch_branch_cost == 0) + loongarch_branch_cost = loongarch_cost->branch_cost; + ++ if (TARGET_DIRECT_EXTERN_ACCESS && flag_shlib) ++ error ("%qs cannot be used for compiling a shared library", ++ "-mdirect-extern-access"); + + switch (la_target.cmodel) + { +diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt +index 639523421..96c811c85 100644 +--- a/gcc/config/loongarch/loongarch.opt ++++ b/gcc/config/loongarch/loongarch.opt +@@ -191,3 +191,7 @@ Enum(cmodel) String(extreme) Value(CMODEL_EXTREME) + mcmodel= + Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(CMODEL_NORMAL) + Specify the code model. ++ ++mdirect-extern-access ++Target Var(TARGET_DIRECT_EXTERN_ACCESS) Init(0) ++Avoid using the GOT to access external symbols. +diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi +index 2a5592516..9ec937b84 100644 +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -1007,6 +1007,7 @@ Objective-C and Objective-C++ Dialects}. + -memcpy -mno-memcpy -mstrict-align -mno-strict-align @gol + -mmax-inline-memcpy-size=@var{n} @gol + -mexplicit-relocs -mno-explicit-relocs @gol ++-mdirect-extern-access -mno-direct-extern-access @gol + -mcmodel=@var{code-model}} + + @emph{M32R/D Options} +@@ -24649,6 +24650,20 @@ GCC build-time by detecting corresponding assembler support: + @code{-mno-explicit-relocs} otherwise. This option is mostly useful for + debugging, or interoperation with assemblers different from the build-time + one. ++ ++@item -mdirect-extern-access ++@itemx -mno-direct-extern-access ++@opindex mdirect-extern-access ++Do not use or use GOT to access external symbols. The default is ++@option{-mno-direct-extern-access}: GOT is used for external symbols with ++default visibility, but not used for other external symbols. ++ ++With @option{-mdirect-extern-access}, GOT is not used and all external ++symbols are PC-relatively addressed. It is @strong{only} suitable for ++environments where no dynamic link is performed, like firmwares, OS ++kernels, executables linked with @option{-static} or @option{-static-pie}. ++@option{-mdirect-extern-access} is not compatible with @option{-fPIC} or ++@option{-fpic}. + @end table + + @node M32C Options +diff --git a/gcc/testsuite/gcc.target/loongarch/direct-extern-1.c b/gcc/testsuite/gcc.target/loongarch/direct-extern-1.c +new file mode 100644 +index 000000000..85c6c1e8a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/direct-extern-1.c +@@ -0,0 +1,6 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mexplicit-relocs -mdirect-extern-access" } */ ++/* { dg-final { scan-assembler-not "got" } } */ ++ ++extern int x; ++int f() { return x; } +diff --git a/gcc/testsuite/gcc.target/loongarch/direct-extern-2.c b/gcc/testsuite/gcc.target/loongarch/direct-extern-2.c +new file mode 100644 +index 000000000..58d8bd68a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/direct-extern-2.c +@@ -0,0 +1,6 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mno-explicit-relocs -mdirect-extern-access" } */ ++/* { dg-final { scan-assembler-not "la.global" } } */ ++ ++extern int x; ++int f() { return x; } +-- +2.33.0 + diff --git a/LoongArch-add-model-attribute.patch b/LoongArch-add-model-attribute.patch new file mode 100644 index 0000000000000000000000000000000000000000..d8161a3682e7915a21dc06cae702c9bd294f0104 --- /dev/null +++ b/LoongArch-add-model-attribute.patch @@ -0,0 +1,477 @@ +From 859ed9ee2dc28b98e11b2bfdeabb0bda7dc921b0 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Fri, 29 Jul 2022 21:45:40 +0800 +Subject: [PATCH 014/124] LoongArch: add model attribute + +A linker script and/or a section attribute may locate some object +specially, so we need to handle the code model for such objects +differently than the -mcmodel setting. This happens when the Linux +kernel loads a module with per-CPU variables. + +Add an attribute to override the code model for a specific variable. + +gcc/ChangeLog: + + * config/loongarch/loongarch-protos.h (loongarch_symbol_type): + Add SYMBOL_PCREL64 and change the description for SYMBOL_PCREL. + * config/loongarch/loongarch.cc (loongarch_attribute_table): + New attribute table. + (TARGET_ATTRIBUTE_TABLE): Define the target hook. + (loongarch_handle_model_attribute): New static function. + (loongarch_classify_symbol): Take TARGET_CMODEL_EXTREME and the + model attribute of SYMBOL_REF_DECL into account returning + SYMBOL_PCREL or SYMBOL_PCREL64. + (loongarch_use_anchors_for_symbol_p): New static function. + (TARGET_USE_ANCHORS_FOR_SYMBOL_P): Define the target hook. + (loongarch_symbol_extreme_p): New static function. + (loongarch_symbolic_constant_p): Handle SYMBOL_PCREL64. + (loongarch_symbol_insns): Likewise. + (loongarch_split_symbol_type): Likewise. + (loongarch_split_symbol): Check SYMBOL_PCREL64 instead of + TARGET_CMODEL_EXTREME for PC-relative addressing. + (loongarch_print_operand_reloc): Likewise. + * doc/extend.texi (Variable Attributes): Document new + LoongArch specific attribute. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/attr-model-test.c: New test. + * gcc.target/loongarch/attr-model-1.c: New test. + * gcc.target/loongarch/attr-model-2.c: New test. + * gcc.target/loongarch/attr-model-diag.c: New test. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch-protos.h | 8 +- + gcc/config/loongarch/loongarch.cc | 190 ++++++++++++++++-- + gcc/doc/extend.texi | 17 ++ + .../gcc.target/loongarch/attr-model-1.c | 6 + + .../gcc.target/loongarch/attr-model-2.c | 6 + + .../gcc.target/loongarch/attr-model-diag.c | 7 + + .../gcc.target/loongarch/attr-model-test.c | 25 +++ + 7 files changed, 238 insertions(+), 21 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-diag.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-test.c + +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +index cadaad751..77b221724 100644 +--- a/gcc/config/loongarch/loongarch-protos.h ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -28,7 +28,12 @@ along with GCC; see the file COPYING3. If not see + The symbol's value will be loaded directly from the GOT. + + SYMBOL_PCREL +- The symbol's value will be loaded directly from data section. ++ The symbol's value will be loaded directly from data section within ++ +/- 2GiB range. ++ ++ SYMBOL_PCREL64 ++ The symbol's value will be loaded directly from data section within ++ +/- 8EiB range. + + SYMBOL_TLS + A thread-local symbol. +@@ -42,6 +47,7 @@ along with GCC; see the file COPYING3. If not see + enum loongarch_symbol_type { + SYMBOL_GOT_DISP, + SYMBOL_PCREL, ++ SYMBOL_PCREL64, + SYMBOL_TLS, + SYMBOL_TLS_IE, + SYMBOL_TLS_LE, +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 452aba9d4..77e3a1053 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -1633,8 +1633,11 @@ loongarch_rtx_constant_in_small_data_p (machine_mode mode) + static enum loongarch_symbol_type + loongarch_classify_symbol (const_rtx x) + { ++ enum loongarch_symbol_type pcrel = ++ TARGET_CMODEL_EXTREME ? SYMBOL_PCREL64 : SYMBOL_PCREL; ++ + if (!SYMBOL_REF_P (x)) +- return SYMBOL_PCREL; ++ return pcrel; + + if (SYMBOL_REF_TLS_MODEL (x)) + return SYMBOL_TLS; +@@ -1642,7 +1645,28 @@ loongarch_classify_symbol (const_rtx x) + if (!loongarch_symbol_binds_local_p (x)) + return SYMBOL_GOT_DISP; + +- return SYMBOL_PCREL; ++ tree t = SYMBOL_REF_DECL (x); ++ if (!t) ++ return pcrel; ++ ++ t = lookup_attribute ("model", DECL_ATTRIBUTES (t)); ++ if (!t) ++ return pcrel; ++ ++ t = TREE_VALUE (TREE_VALUE (t)); ++ ++ /* loongarch_handle_model_attribute should reject other values. */ ++ gcc_assert (TREE_CODE (t) == STRING_CST); ++ ++ const char *model = TREE_STRING_POINTER (t); ++ if (strcmp (model, "normal") == 0) ++ return SYMBOL_PCREL; ++ if (strcmp (model, "extreme") == 0) ++ return SYMBOL_PCREL64; ++ ++ /* loongarch_handle_model_attribute should reject unknown model ++ name. */ ++ gcc_unreachable (); + } + + /* Classify the base of symbolic expression X, given that X appears in +@@ -1695,6 +1719,7 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_type *symbol_type) + case SYMBOL_TLSGD: + case SYMBOL_TLSLDM: + case SYMBOL_PCREL: ++ case SYMBOL_PCREL64: + /* GAS rejects offsets outside the range [-2^31, 2^31-1]. */ + return sext_hwi (INTVAL (offset), 32) == INTVAL (offset); + +@@ -1729,6 +1754,9 @@ loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode) + case SYMBOL_TLSLDM: + return 3; + ++ case SYMBOL_PCREL64: ++ return 5; ++ + case SYMBOL_TLS: + /* We don't treat a bare TLS symbol as a constant. */ + return 0; +@@ -1833,7 +1861,7 @@ loongarch_valid_offset_p (rtx x, machine_mode mode) + return true; + } + +-/* Should a symbol of type SYMBOL_TYPE should be split in two? */ ++/* Should a symbol of type SYMBOL_TYPE should be split in two or more? */ + + bool + loongarch_split_symbol_type (enum loongarch_symbol_type symbol_type) +@@ -1841,6 +1869,7 @@ loongarch_split_symbol_type (enum loongarch_symbol_type symbol_type) + switch (symbol_type) + { + case SYMBOL_PCREL: ++ case SYMBOL_PCREL64: + case SYMBOL_GOT_DISP: + case SYMBOL_TLS_IE: + case SYMBOL_TLS_LE: +@@ -2718,6 +2747,20 @@ loongarch_force_address (rtx x, machine_mode mode) + return x; + } + ++static bool ++loongarch_symbol_extreme_p (enum loongarch_symbol_type type) ++{ ++ switch (type) ++ { ++ case SYMBOL_PCREL: ++ return false; ++ case SYMBOL_PCREL64: ++ return true; ++ default: ++ return TARGET_CMODEL_EXTREME; ++ } ++} ++ + /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise + it appears in a MEM of that mode. Return true if ADDR is a legitimate + constant in that context and can be split into high and low parts. +@@ -2757,7 +2800,7 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out) + high = gen_rtx_HIGH (Pmode, copy_rtx (addr)); + high = loongarch_force_temporary (temp, high); + +- if (TARGET_CMODEL_EXTREME && can_create_pseudo_p ()) ++ if (loongarch_symbol_extreme_p (symbol_type) && can_create_pseudo_p ()) + { + gcc_assert (TARGET_EXPLICIT_RELOCS); + +@@ -2771,14 +2814,16 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out) + if (low_out) + switch (symbol_type) + { +- case SYMBOL_PCREL: +- { +- if (TARGET_CMODEL_EXTREME && can_create_pseudo_p ()) ++ case SYMBOL_PCREL64: ++ if (can_create_pseudo_p ()) ++ { + *low_out = gen_rtx_PLUS (Pmode, high, temp1); +- else +- *low_out = gen_rtx_LO_SUM (Pmode, high, addr); +- break; +- } ++ break; ++ } ++ /* fall through */ ++ case SYMBOL_PCREL: ++ *low_out = gen_rtx_LO_SUM (Pmode, high, addr); ++ break; + + case SYMBOL_GOT_DISP: + /* SYMBOL_GOT_DISP symbols are loaded from the GOT. */ +@@ -4745,22 +4790,23 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part, + bool hi_reloc) + { + const char *reloc; ++ enum loongarch_symbol_type symbol_type = ++ loongarch_classify_symbolic_expression (op); + +- if (TARGET_CMODEL_EXTREME) ++ if (loongarch_symbol_extreme_p (symbol_type)) + gcc_assert (TARGET_EXPLICIT_RELOCS); + +- switch (loongarch_classify_symbolic_expression (op)) ++ switch (symbol_type) + { +- case SYMBOL_PCREL: ++ case SYMBOL_PCREL64: + if (hi64_part) + { +- if (TARGET_CMODEL_EXTREME) +- reloc = hi_reloc ? "%pc64_hi12" : "%pc64_lo20"; +- else +- gcc_unreachable (); ++ reloc = hi_reloc ? "%pc64_hi12" : "%pc64_lo20"; ++ break; + } +- else +- reloc = hi_reloc ? "%pc_hi20" : "%pc_lo12"; ++ /* fall through */ ++ case SYMBOL_PCREL: ++ reloc = hi_reloc ? "%pc_hi20" : "%pc_lo12"; + break; + + case SYMBOL_GOT_DISP: +@@ -6316,6 +6362,104 @@ loongarch_starting_frame_offset (void) + return crtl->outgoing_args_size; + } + ++static tree ++loongarch_handle_model_attribute (tree *node, tree name, tree arg, int, ++ bool *no_add_attrs) ++{ ++ tree decl = *node; ++ if (TREE_CODE (decl) == VAR_DECL) ++ { ++ if (DECL_THREAD_LOCAL_P (decl)) ++ { ++ error_at (DECL_SOURCE_LOCATION (decl), ++ "%qE attribute cannot be specified for thread-local " ++ "variables", name); ++ *no_add_attrs = true; ++ return NULL_TREE; ++ } ++ if (DECL_CONTEXT (decl) ++ && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL ++ && !TREE_STATIC (decl)) ++ { ++ error_at (DECL_SOURCE_LOCATION (decl), ++ "%qE attribute cannot be specified for local " ++ "variables", name); ++ *no_add_attrs = true; ++ return NULL_TREE; ++ } ++ if (DECL_REGISTER (decl)) ++ { ++ error_at (DECL_SOURCE_LOCATION (decl), ++ "%qE attribute cannot be specified for register " ++ "variables", name); ++ *no_add_attrs = true; ++ return NULL_TREE; ++ } ++ if (!TARGET_EXPLICIT_RELOCS) ++ { ++ error_at (DECL_SOURCE_LOCATION (decl), ++ "%qE attribute requires %s", name, "-mexplicit-relocs"); ++ *no_add_attrs = true; ++ return NULL_TREE; ++ } ++ ++ arg = TREE_VALUE (arg); ++ if (TREE_CODE (arg) != STRING_CST) ++ { ++ error_at (DECL_SOURCE_LOCATION (decl), ++ "invalid argument of %qE attribute", name); ++ *no_add_attrs = true; ++ return NULL_TREE; ++ } ++ ++ const char *model = TREE_STRING_POINTER (arg); ++ if (strcmp (model, "normal") != 0 ++ && strcmp (model, "extreme") != 0) ++ { ++ error_at (DECL_SOURCE_LOCATION (decl), ++ "invalid argument of %qE attribute", name); ++ *no_add_attrs = true; ++ return NULL_TREE; ++ } ++ ++ if (lookup_attribute ("model", DECL_ATTRIBUTES (decl))) ++ { ++ error_at (DECL_SOURCE_LOCATION (decl), ++ "multiple %qE attribute", name); ++ *no_add_attrs = true; ++ return NULL_TREE; ++ } ++ } ++ else ++ { ++ warning (OPT_Wattributes, "%qE attribute ignored", name); ++ *no_add_attrs = true; ++ } ++ return NULL_TREE; ++} ++ ++static const struct attribute_spec loongarch_attribute_table[] = ++{ ++ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, ++ affects_type_identity, handler, exclude } */ ++ { "model", 1, 1, true, false, false, false, ++ loongarch_handle_model_attribute, NULL }, ++ /* The last attribute spec is set to be NULL. */ ++ {} ++}; ++ ++bool ++loongarch_use_anchors_for_symbol_p (const_rtx symbol) ++{ ++ tree decl = SYMBOL_REF_DECL (symbol); ++ ++ /* The section anchor optimization may break custom address model. */ ++ if (decl && lookup_attribute ("model", DECL_ATTRIBUTES (decl))) ++ return false; ++ ++ return default_use_anchors_for_symbol_p (symbol); ++} ++ + /* Initialize the GCC target structure. */ + #undef TARGET_ASM_ALIGNED_HI_OP + #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t" +@@ -6504,6 +6648,12 @@ loongarch_starting_frame_offset (void) + #undef TARGET_HAVE_SPECULATION_SAFE_VALUE + #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed + ++#undef TARGET_ATTRIBUTE_TABLE ++#define TARGET_ATTRIBUTE_TABLE loongarch_attribute_table ++ ++#undef TARGET_USE_ANCHORS_FOR_SYMBOL_P ++#define TARGET_USE_ANCHORS_FOR_SYMBOL_P loongarch_use_anchors_for_symbol_p ++ + struct gcc_target targetm = TARGET_INITIALIZER; + + #include "gt-loongarch.h" +diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi +index 33a776a79..da2840c23 100644 +--- a/gcc/doc/extend.texi ++++ b/gcc/doc/extend.texi +@@ -7277,6 +7277,7 @@ attributes. + * Blackfin Variable Attributes:: + * H8/300 Variable Attributes:: + * IA-64 Variable Attributes:: ++* LoongArch Variable Attributes:: + * M32R/D Variable Attributes:: + * MeP Variable Attributes:: + * Microsoft Windows Variable Attributes:: +@@ -8061,6 +8062,22 @@ defined by shared libraries. + + @end table + ++@node LoongArch Variable Attributes ++@subsection LoongArch Variable Attributes ++ ++One attribute is currently defined for the LoongArch. ++ ++@table @code ++@item model("@var{name}") ++@cindex @code{model} variable attribute, LoongArch ++Use this attribute on the LoongArch to use a different code model for ++addressing this variable, than the code model specified by the global ++@option{-mcmodel} option. This attribute is mostly useful if a ++@code{section} attribute and/or a linker script will locate this object ++specially. Currently the only supported values of @var{name} are ++@code{normal} and @code{extreme}. ++@end table ++ + @node M32R/D Variable Attributes + @subsection M32R/D Variable Attributes + +diff --git a/gcc/testsuite/gcc.target/loongarch/attr-model-1.c b/gcc/testsuite/gcc.target/loongarch/attr-model-1.c +new file mode 100644 +index 000000000..916d715b9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/attr-model-1.c +@@ -0,0 +1,6 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mexplicit-relocs -mcmodel=normal -O2" } */ ++/* { dg-final { scan-assembler-times "%pc64_hi12" 2 } } */ ++ ++#define ATTR_MODEL_TEST ++#include "attr-model-test.c" +diff --git a/gcc/testsuite/gcc.target/loongarch/attr-model-2.c b/gcc/testsuite/gcc.target/loongarch/attr-model-2.c +new file mode 100644 +index 000000000..a74c795ac +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/attr-model-2.c +@@ -0,0 +1,6 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mexplicit-relocs -mcmodel=extreme -O2" } */ ++/* { dg-final { scan-assembler-times "%pc64_hi12" 3 } } */ ++ ++#define ATTR_MODEL_TEST ++#include "attr-model-test.c" +diff --git a/gcc/testsuite/gcc.target/loongarch/attr-model-diag.c b/gcc/testsuite/gcc.target/loongarch/attr-model-diag.c +new file mode 100644 +index 000000000..88beede74 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/attr-model-diag.c +@@ -0,0 +1,7 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mexplicit-relocs" } */ ++ ++__thread int x __attribute__((model("extreme"))); /* { dg-error "attribute cannot be specified for thread-local variables" } */ ++register int y __asm__("tp") __attribute__((model("extreme"))); /* { dg-error "attribute cannot be specified for register variables" } */ ++int z __attribute__((model(114))); /* { dg-error "invalid argument" } */ ++int t __attribute__((model("good"))); /* { dg-error "invalid argument" } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/attr-model-test.c b/gcc/testsuite/gcc.target/loongarch/attr-model-test.c +new file mode 100644 +index 000000000..5b61a7af9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/attr-model-test.c +@@ -0,0 +1,25 @@ ++#ifdef ATTR_MODEL_TEST ++int x __attribute__((model("extreme"))); ++int y __attribute__((model("normal"))); ++int z; ++ ++int ++test(void) ++{ ++ return x + y + z; ++} ++ ++/* The following will be used for kernel per-cpu storage implemention. */ ++ ++register char *per_cpu_base __asm__("r21"); ++static int counter __attribute__((section(".data..percpu"), model("extreme"))); ++ ++void ++inc_counter(void) ++{ ++ int *ptr = (int *)(per_cpu_base + (long)&counter); ++ (*ptr)++; ++} ++#endif ++ ++int dummy; +-- +2.33.0 + diff --git a/LoongArch-add-new-configure-option-with-strict-align.patch b/LoongArch-add-new-configure-option-with-strict-align.patch new file mode 100644 index 0000000000000000000000000000000000000000..0859a8f41d68a24cdd8297e3cad1bd5b353fbccb --- /dev/null +++ b/LoongArch-add-new-configure-option-with-strict-align.patch @@ -0,0 +1,86 @@ +From da22606529688b125e6e08589a6dfe741b8dd18d Mon Sep 17 00:00:00 2001 +From: Yang Yujie +Date: Mon, 28 Aug 2023 10:20:12 +0800 +Subject: [PATCH 060/124] LoongArch: add new configure option + --with-strict-align-lib + +LoongArch processors may not support memory accesses without natural +alignments. Building libraries with -mstrict-align may help with +toolchain binary compatiblity and performance on these implementations +(e.g. Loongson 2K1000LA). + +No significant performance degredation is observed on current mainstream +LoongArch processors when the option is enabled. + +gcc/ChangeLog: + + * config.gcc: use -mstrict-align for building libraries + if --with-strict-align-lib is given. + * doc/install.texi: likewise. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config.gcc | 16 +++++++++++++++- + gcc/doc/install.texi | 4 ++++ + 2 files changed, 19 insertions(+), 1 deletion(-) + +diff --git a/gcc/config.gcc b/gcc/config.gcc +index 62525c296..16bbaea45 100644 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -4966,7 +4966,7 @@ case "${target}" in + ;; + + loongarch*-*) +- supported_defaults="abi arch tune fpu simd multilib-default" ++ supported_defaults="abi arch tune fpu simd multilib-default strict-align-lib" + + # Local variables + unset \ +@@ -5163,6 +5163,17 @@ case "${target}" in + ;; + esac + ++ # Build libraries with -mstrict-align if --with-strict-align-lib is given. ++ case ${with_strict_align_lib} in ++ yes) strict_align_opt="/mstrict-align" ;; ++ ""|no) ;; ++ *) ++ echo "Unknown option: --with-strict-align-lib=${with_strict_align_lib}" 1>&2 ++ exit 1 ++ ;; ++ esac ++ ++ + # Handle --with-multilib-default + if echo "${with_multilib_default}" \ + | grep -E -e '[[:space:]]' -e '//' -e '/$' -e '^/' > /dev/null 2>&1; then +@@ -5324,6 +5335,9 @@ case "${target}" in + ;; + esac + ++ # Use mstrict-align for building libraries if --with-strict-align-lib is given. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}${strict_align_opt}" ++ + # Check for repeated configuration of the same multilib variant. + if echo "${elem_abi_base}/${elem_abi_ext}" \ + | grep -E "^(${all_abis%|})$" >/dev/null 2>&1; then +diff --git a/gcc/doc/install.texi b/gcc/doc/install.texi +index 1fc5f0bfa..a8851e8bd 100644 +--- a/gcc/doc/install.texi ++++ b/gcc/doc/install.texi +@@ -1353,6 +1353,10 @@ Multiple @var{option}s may appear consecutively while @var{arch} may only + appear in the beginning or be omitted (which means @option{-march=abi-default} + is applied when building the libraries). + ++@item --with-strict-align-lib ++On LoongArch targets, build all enabled multilibs with @option{-mstrict-align} ++(Not enabled by default). ++ + @item --with-multilib-generator=@var{config} + Specify what multilibs to build. @var{config} is a semicolon separated list of + values, possibly consisting of a single value. Currently only implemented +-- +2.33.0 + diff --git a/LoongArch-adjust-the-default-of-mexplicit-relocs-by-.patch b/LoongArch-adjust-the-default-of-mexplicit-relocs-by-.patch new file mode 100644 index 0000000000000000000000000000000000000000..5874c7e591efffb38e29bcb47128b7d7cc03975b --- /dev/null +++ b/LoongArch-adjust-the-default-of-mexplicit-relocs-by-.patch @@ -0,0 +1,149 @@ +From aa10a2949c86e46b7952acbb58599e9bfdeabdfb Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Tue, 26 Jul 2022 21:46:20 +0800 +Subject: [PATCH 006/124] LoongArch: adjust the default of -mexplicit-relocs by + checking gas feature + +The assembly produced with -mexplicit-relocs is not supported by gas <= +2.39. Check if the assembler supports explicit relocations and set the +default accordingly. + +gcc/ChangeLog: + + * configure.ac (HAVE_AS_EXPLICIT_RELOCS): Define to 1 if the + assembler supports explicit relocation for LoongArch. + * configure: Regenerate. + * config/loongarch/loongarch-opts.h (HAVE_AS_EXPLICIT_RELOCS): + Define to 0 if not defined. + * config/loongarch/genopts/loongarch.opt.in + (TARGET_EXPLICIT_RELOCS): Default to HAVE_AS_EXPLICIT_RELOCS. + * config/loongarch/loongarch.opt: Regenerate. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/genopts/loongarch.opt.in | 2 +- + gcc/config/loongarch/loongarch-opts.h | 4 +++ + gcc/config/loongarch/loongarch.opt | 2 +- + gcc/configure | 33 ++++++++++++++++++- + gcc/configure.ac | 7 +++- + 5 files changed, 44 insertions(+), 4 deletions(-) + +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +index 6f3950093..a571b6b75 100644 +--- a/gcc/config/loongarch/genopts/loongarch.opt.in ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -155,7 +155,7 @@ Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init + -mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. + + mexplicit-relocs +-Target Var(TARGET_EXPLICIT_RELOCS) Init(1) ++Target Var(TARGET_EXPLICIT_RELOCS) Init(HAVE_AS_EXPLICIT_RELOCS) + Use %reloc() assembly operators. + + ; The code model option names for -mcmodel. +diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h +index eaa6fc074..da24ecd2b 100644 +--- a/gcc/config/loongarch/loongarch-opts.h ++++ b/gcc/config/loongarch/loongarch-opts.h +@@ -87,4 +87,8 @@ loongarch_config_target (struct loongarch_target *target, + while -m[no]-memcpy imposes a global constraint. */ + #define TARGET_DO_OPTIMIZE_BLOCK_MOVE_P loongarch_do_optimize_block_move_p() + ++#ifndef HAVE_AS_EXPLICIT_RELOCS ++#define HAVE_AS_EXPLICIT_RELOCS 0 ++#endif ++ + #endif /* LOONGARCH_OPTS_H */ +diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt +index 7a8c5b444..9df7e1872 100644 +--- a/gcc/config/loongarch/loongarch.opt ++++ b/gcc/config/loongarch/loongarch.opt +@@ -162,7 +162,7 @@ Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init + -mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. + + mexplicit-relocs +-Target Var(TARGET_EXPLICIT_RELOCS) Init(1) ++Target Var(TARGET_EXPLICIT_RELOCS) Init(HAVE_AS_EXPLICIT_RELOCS) + Use %reloc() assembly operators. + + ; The code model option names for -mcmodel. +diff --git a/gcc/configure b/gcc/configure +index 98bbf0f85..840eddc7c 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -28792,7 +28792,7 @@ $as_echo "#define HAVE_AS_MARCH_ZIFENCEI 1" >>confdefs.h + fi + + ;; +- loongarch*-*-*) ++ loongarch*-*-*) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for .dtprelword support" >&5 + $as_echo_n "checking assembler for .dtprelword support... " >&6; } + if ${gcc_cv_as_loongarch_dtprelword+:} false; then : +@@ -28828,6 +28828,37 @@ if test $gcc_cv_as_loongarch_dtprelword != yes; then + $as_echo "#define HAVE_AS_DTPRELWORD 1" >>confdefs.h + + fi ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for explicit relocation support" >&5 ++$as_echo_n "checking assembler for explicit relocation support... " >&6; } ++if ${gcc_cv_as_loongarch_explicit_relocs+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ gcc_cv_as_loongarch_explicit_relocs=no ++ if test x$gcc_cv_as != x; then ++ $as_echo 'a:pcalau12i $t0,%pc_hi20(a)' > conftest.s ++ if { ac_try='$gcc_cv_as $gcc_cv_as_flags -o conftest.o conftest.s >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; } ++ then ++ gcc_cv_as_loongarch_explicit_relocs=yes ++ else ++ echo "configure: failed program was" >&5 ++ cat conftest.s >&5 ++ fi ++ rm -f conftest.o conftest.s ++ fi ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_loongarch_explicit_relocs" >&5 ++$as_echo "$gcc_cv_as_loongarch_explicit_relocs" >&6; } ++if test $gcc_cv_as_loongarch_explicit_relocs = yes; then ++ ++$as_echo "#define HAVE_AS_EXPLICIT_RELOCS 1" >>confdefs.h ++ ++fi ++ + ;; + s390*-*-*) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for .gnu_attribute support" >&5 +diff --git a/gcc/configure.ac b/gcc/configure.ac +index c74f4b555..975c852c6 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -5309,7 +5309,7 @@ configured with --enable-newlib-nano-formatted-io.]) + [AC_DEFINE(HAVE_AS_MARCH_ZIFENCEI, 1, + [Define if the assembler understands -march=rv*_zifencei.])]) + ;; +- loongarch*-*-*) ++ loongarch*-*-*) + gcc_GAS_CHECK_FEATURE([.dtprelword support], + gcc_cv_as_loongarch_dtprelword, [2,18,0],, + [.section .tdata,"awT",@progbits +@@ -5319,6 +5319,11 @@ x: + .dtprelword x+0x8000],, + [AC_DEFINE(HAVE_AS_DTPRELWORD, 1, + [Define if your assembler supports .dtprelword.])]) ++ gcc_GAS_CHECK_FEATURE([explicit relocation support], ++ gcc_cv_as_loongarch_explicit_relocs,, ++ [a:pcalau12i $t0,%pc_hi20(a)],, ++ [AC_DEFINE(HAVE_AS_EXPLICIT_RELOCS, 1, ++ [Define if your assembler supports explicit relocation.])]) + ;; + s390*-*-*) + gcc_GAS_CHECK_FEATURE([.gnu_attribute support], +-- +2.33.0 + diff --git a/LoongArch-define-preprocessing-macros-__loongarch_-a.patch b/LoongArch-define-preprocessing-macros-__loongarch_-a.patch new file mode 100644 index 0000000000000000000000000000000000000000..53ab84bb043078d6576a799ed6d94faaca41deda --- /dev/null +++ b/LoongArch-define-preprocessing-macros-__loongarch_-a.patch @@ -0,0 +1,42 @@ +From 41b01fb34126d8b40635af1847b21716f62e5388 Mon Sep 17 00:00:00 2001 +From: Yang Yujie +Date: Mon, 28 Aug 2023 09:32:16 +0800 +Subject: [PATCH 059/124] LoongArch: define preprocessing macros + "__loongarch_{arch,tune}" + +These are exported according to the LoongArch Toolchain Conventions[1] +as a replacement of the obsolete "_LOONGARCH_{ARCH,TUNE}" macros, +which are expanded to strings representing the actual architecture +and microarchitecture of the target. + +[1] currently relased at https://github.com/loongson/LoongArch-Documentation + /blob/main/docs/LoongArch-toolchain-conventions-EN.adoc + +gcc/ChangeLog: + + * config/loongarch/loongarch-c.cc: Export macros + "__loongarch_{arch,tune}" in the preprocessor. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch-c.cc | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/gcc/config/loongarch/loongarch-c.cc b/gcc/config/loongarch/loongarch-c.cc +index 2cf84eec7..c9b11a042 100644 +--- a/gcc/config/loongarch/loongarch-c.cc ++++ b/gcc/config/loongarch/loongarch-c.cc +@@ -64,6 +64,9 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile) + LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", la_target.cpu_arch); + LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", la_target.cpu_tune); + ++ LARCH_CPP_SET_PROCESSOR ("__loongarch_arch", la_target.cpu_arch); ++ LARCH_CPP_SET_PROCESSOR ("__loongarch_tune", la_target.cpu_tune); ++ + /* Base architecture / ABI. */ + if (TARGET_64BIT) + { +-- +2.33.0 + diff --git a/LoongArch-document-m-no-explicit-relocs.patch b/LoongArch-document-m-no-explicit-relocs.patch new file mode 100644 index 0000000000000000000000000000000000000000..9e025246778790668f038807fb4260d556cbf3dd --- /dev/null +++ b/LoongArch-document-m-no-explicit-relocs.patch @@ -0,0 +1,43 @@ +From 3742550e00bf0401ead01cde64fc1571ffa075fc Mon Sep 17 00:00:00 2001 +From: WANG Xuerui +Date: Wed, 27 Jul 2022 15:01:17 +0800 +Subject: [PATCH 007/124] LoongArch: document -m[no-]explicit-relocs + +gcc/ChangeLog: + + * doc/invoke.texi: Document -m[no-]explicit-relocs for + LoongArch. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/doc/invoke.texi | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi +index 2b376e0e9..1de2b2bd4 100644 +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -24663,6 +24663,19 @@ global symbol: The data got table must be within +/-8EiB addressing space. + @end itemize + @end table + The default code model is @code{normal}. ++ ++@item -mexplicit-relocs ++@itemx -mno-explicit-relocs ++@opindex mexplicit-relocs ++@opindex mno-explicit-relocs ++Use or do not use assembler relocation operators when dealing with symbolic ++addresses. The alternative is to use assembler macros instead, which may ++limit optimization. The default value for the option is determined during ++GCC build-time by detecting corresponding assembler support: ++@code{-mexplicit-relocs} if said support is present, ++@code{-mno-explicit-relocs} otherwise. This option is mostly useful for ++debugging, or interoperation with assemblers different from the build-time ++one. + @end table + + @node M32C Options +-- +2.33.0 + diff --git a/LoongArch-fix-error-building.patch b/LoongArch-fix-error-building.patch new file mode 100644 index 0000000000000000000000000000000000000000..e1d80e09c1dd3869dcc658407789d85b23a338fd --- /dev/null +++ b/LoongArch-fix-error-building.patch @@ -0,0 +1,183 @@ +diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc +index a4a7dbec9..2d9743d86 100644 +--- a/gcc/config/loongarch/loongarch-builtins.cc ++++ b/gcc/config/loongarch/loongarch-builtins.cc +@@ -2440,11 +2440,6 @@ loongarch_init_builtins (void) + unsigned int i; + tree type; + +- /* Register the type float128_type_node as a built-in type and +- give it an alias "__float128". */ +- (*lang_hooks.types.register_builtin_type) (float128_type_node, +- "__float128"); +- + /* Iterate through all of the bdesc arrays, initializing all of the + builtin functions. */ + for (i = 0; i < ARRAY_SIZE (loongarch_builtins); i++) +diff --git a/gcc/config/loongarch/loongarch-c.cc b/gcc/config/loongarch/loongarch-c.cc +index c9b11a042..76c8ea8db 100644 +--- a/gcc/config/loongarch/loongarch-c.cc ++++ b/gcc/config/loongarch/loongarch-c.cc +@@ -117,17 +117,6 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile) + builtin_define ("__loongarch_simd_width=256"); + } + +- /* Add support for FLOAT128_TYPE on the LoongArch architecture. */ +- builtin_define ("__FLOAT128_TYPE__"); +- +- /* Map the old _Float128 'q' builtins into the new 'f128' builtins. */ +- builtin_define ("__builtin_fabsq=__builtin_fabsf128"); +- builtin_define ("__builtin_copysignq=__builtin_copysignf128"); +- builtin_define ("__builtin_nanq=__builtin_nanf128"); +- builtin_define ("__builtin_nansq=__builtin_nansf128"); +- builtin_define ("__builtin_infq=__builtin_inff128"); +- builtin_define ("__builtin_huge_valq=__builtin_huge_valf128"); +- + /* Native Data Sizes. */ + builtin_define_with_int_value ("_LOONGARCH_SZINT", INT_TYPE_SIZE); + builtin_define_with_int_value ("_LOONGARCH_SZLONG", LONG_TYPE_SIZE); +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index baa9831aa..ae074edbd 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -9712,13 +9712,10 @@ expand_perm_const_2_end: + /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */ + + static bool +-loongarch_vectorize_vec_perm_const (machine_mode vmode, machine_mode op_mode, ++loongarch_vectorize_vec_perm_const (machine_mode vmode, + rtx target, rtx op0, rtx op1, + const vec_perm_indices &sel) + { +- if (vmode != op_mode) +- return false; +- + struct expand_vec_perm_d d; + int i, nelt, which; + unsigned char orig_perm[MAX_VECT_LEN]; +diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi +index bb19d0f27..1d1bac255 100644 +--- a/gcc/doc/extend.texi ++++ b/gcc/doc/extend.texi +@@ -1085,10 +1085,10 @@ types. + As an extension, GNU C and GNU C++ support additional floating + types, which are not supported by all targets. + @itemize @bullet +-@item @code{__float128} is available on i386, x86_64, IA-64, LoongArch +-and hppa HP-UX, as well as on PowerPC GNU/Linux targets that enable ++@item @code{__float128} is available on i386, x86_64, IA-64, and ++hppa HP-UX, as well as on PowerPC GNU/Linux targets that enable + the vector scalar (VSX) instruction set. @code{__float128} supports +-the 128-bit floating type. On i386, x86_64, PowerPC, LoongArch and IA-64, ++the 128-bit floating type. On i386, x86_64, PowerPC, and IA-64 + other than HP-UX, @code{__float128} is an alias for @code{_Float128}. + On hppa and IA-64 HP-UX, @code{__float128} is an alias for @code{long + double}. +@@ -16257,20 +16257,6 @@ function you need to include @code{larchintrin.h}. + void __break (imm0_32767) + @end smallexample + +-Additional built-in functions are available for LoongArch family +-processors to efficiently use 128-bit floating-point (__float128) +-values. +- +-The following are the basic built-in functions supported. +-@smallexample +-__float128 __builtin_fabsq (__float128); +-__float128 __builtin_copysignq (__float128, __float128); +-__float128 __builtin_infq (void); +-__float128 __builtin_huge_valq (void); +-__float128 __builtin_nanq (void); +-__float128 __builtin_nansq (void); +-@end smallexample +- + @node MIPS DSP Built-in Functions + @subsection MIPS DSP Built-in Functions + +diff --git a/gcc/testsuite/gcc.target/loongarch/math-float-128.c b/gcc/testsuite/gcc.target/loongarch/math-float-128.c +deleted file mode 100644 +index 387566a57..000000000 +--- a/gcc/testsuite/gcc.target/loongarch/math-float-128.c ++++ /dev/null +@@ -1,81 +0,0 @@ +-/* { dg-do compile } */ +-/* { dg-options " -march=loongarch64 -O2 " } */ +-/* { dg-final { scan-assembler-not "my_fabsq2:.*\\bl\t%plt\\(__builtin_fabsq\\).*my_fabsq2" } } */ +-/* { dg-final { scan-assembler-not "my_copysignq2:.*\\bl\t%plt\\(__builtin_copysignq\\).*my_copysignq2" } } */ +-/* { dg-final { scan-assembler-not "my_infq2:.*\\bl\t%plt\\(__builtin_infq\\).*my_infq2" } } */ +-/* { dg-final { scan-assembler-not "my_huge_valq2:.*\\bl\t%plt\\(__builtin_huge_valq\\).*my_huge_valq2" } } */ +-/* { dg-final { scan-assembler-not "my_nanq2:.*\\bl\t%plt\\(__builtin_nanq\\).*my_nanq2" } } */ +-/* { dg-final { scan-assembler-not "my_nansq2:.*\\bl\t%plt\\(__builtin_nansq\\).*my_nansq2" } } */ +- +-__float128 +-my_fabsq1 (__float128 a) +-{ +- return __builtin_fabsq (a); +-} +- +-_Float128 +-my_fabsq2 (_Float128 a) +-{ +- return __builtin_fabsq (a); +-} +- +-__float128 +-my_copysignq1 (__float128 a, __float128 b) +-{ +- return __builtin_copysignq (a, b); +-} +- +-_Float128 +-my_copysignq2 (_Float128 a, _Float128 b) +-{ +- return __builtin_copysignq (a, b); +-} +- +-__float128 +-my_infq1 (void) +-{ +- return __builtin_infq (); +-} +- +-_Float128 +-my_infq2 (void) +-{ +- return __builtin_infq (); +-} +- +-__float128 +-my_huge_valq1 (void) +-{ +- return __builtin_huge_valq (); +-} +- +-_Float128 +-my_huge_valq2 (void) +-{ +- return __builtin_huge_valq (); +-} +- +-__float128 +-my_nanq1 (void) +-{ +- return __builtin_nanq (""); +-} +- +-_Float128 +-my_nanq2 (void) +-{ +- return __builtin_nanq (""); +-} +- +-__float128 +-my_nansq1 (void) +-{ +- return __builtin_nansq (""); +-} +- +-_Float128 +-my_nansq2 (void) +-{ +- return __builtin_nansq (""); +-} +- diff --git a/LoongArch-fix-signed-overflow-in-loongarch_emit_int_.patch b/LoongArch-fix-signed-overflow-in-loongarch_emit_int_.patch new file mode 100644 index 0000000000000000000000000000000000000000..41eeff91d097b6ffd9c12538820e1d7382c85af6 --- /dev/null +++ b/LoongArch-fix-signed-overflow-in-loongarch_emit_int_.patch @@ -0,0 +1,43 @@ +From 15f1e94ddd7128f407ada43fd9e4b26d4a8bba8d Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Fri, 4 Nov 2022 01:35:25 +0800 +Subject: [PATCH 026/124] LoongArch: fix signed overflow in + loongarch_emit_int_compare + +Signed overflow is an undefined behavior, so we need to prevent it from +happening, instead of "checking" the result. + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc (loongarch_emit_int_compare): + Avoid signed overflow. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + gcc/config/loongarch/loongarch.cc | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index e9ba3374e..d552b162a 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -4177,10 +4177,13 @@ loongarch_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1) + if (!increment && !decrement) + continue; + ++ if ((increment && rhs == HOST_WIDE_INT_MAX) ++ || (decrement && rhs == HOST_WIDE_INT_MIN)) ++ break; ++ + new_rhs = rhs + (increment ? 1 : -1); + if (loongarch_integer_cost (new_rhs) +- < loongarch_integer_cost (rhs) +- && (rhs < 0) == (new_rhs < 0)) ++ < loongarch_integer_cost (rhs)) + { + *op1 = GEN_INT (new_rhs); + *code = mag_comparisons[i][increment]; +-- +2.33.0 + diff --git a/LoongArch-implement-count_-leading-trailing-_zeros.patch b/LoongArch-implement-count_-leading-trailing-_zeros.patch new file mode 100644 index 0000000000000000000000000000000000000000..6efc57f3d3aabfcb4836ad46403cfd80a387df9c --- /dev/null +++ b/LoongArch-implement-count_-leading-trailing-_zeros.patch @@ -0,0 +1,50 @@ +From ad6541a22dd909a04d745b42c8da5e04733efec4 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Wed, 12 Oct 2022 22:06:07 +0800 +Subject: [PATCH 023/124] LoongArch: implement count_{leading,trailing}_zeros + +LoongArch always support clz and ctz instructions, so we can always use +__builtin_{clz,ctz} for count_{leading,trailing}_zeros. This improves +the code of libgcc, and also benefits Glibc once we merge longlong.h +there. + +Bootstrapped and regtested on loongarch64-linux-gnu. + +include/ChangeLog: + + * longlong.h [__loongarch__] (count_leading_zeros): Define. + [__loongarch__] (count_trailing_zeros): Likewise. + [__loongarch__] (COUNT_LEADING_ZEROS_0): Likewise. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + include/longlong.h | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/include/longlong.h b/include/longlong.h +index 64a7b10f9..c3a6f1e7e 100644 +--- a/include/longlong.h ++++ b/include/longlong.h +@@ -593,6 +593,18 @@ extern UDItype __umulsidi3 (USItype, USItype); + #define UMUL_TIME 14 + #endif + ++#ifdef __loongarch__ ++# if W_TYPE_SIZE == 32 ++# define count_leading_zeros(count, x) ((count) = __builtin_clz (x)) ++# define count_trailing_zeros(count, x) ((count) = __builtin_ctz (x)) ++# define COUNT_LEADING_ZEROS_0 32 ++# elif W_TYPE_SIZE == 64 ++# define count_leading_zeros(count, x) ((count) = __builtin_clzll (x)) ++# define count_trailing_zeros(count, x) ((count) = __builtin_ctzll (x)) ++# define COUNT_LEADING_ZEROS_0 64 ++# endif ++#endif ++ + #if defined (__M32R__) && W_TYPE_SIZE == 32 + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + /* The cmp clears the condition bit. */ \ +-- +2.33.0 + diff --git a/LoongArch-improved-target-configuration-interface.patch b/LoongArch-improved-target-configuration-interface.patch new file mode 100644 index 0000000000000000000000000000000000000000..2c8b7ad2ce700334c3badbf7e433f3ac94a25cc2 --- /dev/null +++ b/LoongArch-improved-target-configuration-interface.patch @@ -0,0 +1,3024 @@ +From b980a32eabcbd34e8f8e6a245dbba1898256555e Mon Sep 17 00:00:00 2001 +From: Yang Yujie +Date: Wed, 23 Aug 2023 15:16:21 +0800 +Subject: [PATCH 058/124] LoongArch: improved target configuration interface + +The configure script and the GCC driver are updated so that +it is easier to customize and control GCC builds for targeting +different LoongArch implementations. + +* Make --with-abi obsolete, since it might cause different default ABI + under the same target triplet, which is undesirable. The default ABI + is now purely decided by the target triplet. + +* Support options for LoongArch SIMD extensions: + new configure options --with-simd={none,lsx,lasx}; + new compiler option -msimd={none,lsx,lasx}; + new driver options -m[no]-l[a]sx. + +* Enforce the priority of configuration paths (for ={fpu,tune,simd}): + -m > -march-implied > --with- > --with-arch-implied. + +* Allow the user to control the compiler options used when building + GCC libraries for each multilib variant via --with-multilib-list + and --with-multilib-default. This could become more useful when + we have 32-bit support later. + + Example 1: the following configure option + --with-multilib-list=lp64d/la464/mno-strict-align/msimd=lsx,lp64s/mfpu=32 + | | | | + -mabi=ABI -march=ARCH a list of other options + (mandatory) (optional) (optional) + + builds two sets of libraries: + 1. lp64d/base ABI (built with "-march=la464 -mno-strict-align -msimd=lsx") + 2. lp64s/base ABI (built with "-march=abi-default -mfpu=32") + + Example 2: the following 3 configure options + + --with-arch=loongarch64 + --with-multilib-list=lp64d,lp64f,lp64s/la464 + --with-multilib-default=fixed/mno-strict-align/mfpu=64 + | | | + -march=ARCH a list of other options + (optional) (optional) + + is equivalent to (in terms of building libraries): + + --with-multilib-list=\ + lp64d/loongarch64/mno-strict-align/mfpu=64,\ + lp64f/loongarch64/mno-strict-align/mfpu=64,\ + lp64s/la464 + + Note: + 1. the GCC driver and compiler proper does not support + "-march=fixed". "fixed" that appear here acts as a placeholder for + "use whatever ARCH in --with-arch=ARCH" (or the default value + of --with-arch=ARCH if --with-arch is not explicitly configured). + + 2. if the ARCH part is omitted, "-march=abi-default" + is used for building all library variants, which + practically means enabling the minimal ISA features + that can support the given ABI. + +ChangeLog: + + * config-ml.in: Do not build the multilib library variant + that is duplicate with the toplevel one. + +gcc/ChangeLog: + + * config.gcc: Make --with-abi= obsolete, decide the default ABI + with target triplet. Allow specifying multilib library build + options with --with-multilib-list and --with-multilib-default. + * config/loongarch/t-linux: Likewise. + * config/loongarch/genopts/loongarch-strings: Likewise. + * config/loongarch/loongarch-str.h: Likewise. + * doc/install.texi: Likewise. + * config/loongarch/genopts/loongarch.opt.in: Introduce + -m[no-]l[a]sx options. Only process -m*-float and + -m[no-]l[a]sx in the GCC driver. + * config/loongarch/loongarch.opt: Likewise. + * config/loongarch/la464.md: Likewise. + * config/loongarch/loongarch-c.cc: Likewise. + * config/loongarch/loongarch-cpu.cc: Likewise. + * config/loongarch/loongarch-cpu.h: Likewise. + * config/loongarch/loongarch-def.c: Likewise. + * config/loongarch/loongarch-def.h: Likewise. + * config/loongarch/loongarch-driver.cc: Likewise. + * config/loongarch/loongarch-driver.h: Likewise. + * config/loongarch/loongarch-opts.cc: Likewise. + * config/loongarch/loongarch-opts.h: Likewise. + * config/loongarch/loongarch.cc: Likewise. + * doc/invoke.texi: Likewise. + +Signed-off-by: Peng Fan +Signed-off-by: ticat_fp +--- + config-ml.in | 10 + + gcc/config.gcc | 379 ++++++++++-------- + .../loongarch/genopts/loongarch-strings | 8 +- + gcc/config/loongarch/genopts/loongarch.opt.in | 62 +-- + gcc/config/loongarch/la464.md | 32 +- + gcc/config/loongarch/loongarch-c.cc | 19 +- + gcc/config/loongarch/loongarch-cpu.cc | 263 +++++++----- + gcc/config/loongarch/loongarch-cpu.h | 3 +- + gcc/config/loongarch/loongarch-def.c | 67 ++-- + gcc/config/loongarch/loongarch-def.h | 57 +-- + gcc/config/loongarch/loongarch-driver.cc | 208 +++++----- + gcc/config/loongarch/loongarch-driver.h | 40 +- + gcc/config/loongarch/loongarch-opts.cc | 372 ++++++++++++----- + gcc/config/loongarch/loongarch-opts.h | 59 +-- + gcc/config/loongarch/loongarch-str.h | 7 +- + gcc/config/loongarch/loongarch.cc | 87 ++-- + gcc/config/loongarch/loongarch.opt | 60 ++- + gcc/config/loongarch/t-linux | 32 +- + gcc/doc/install.texi | 52 ++- + 19 files changed, 1136 insertions(+), 681 deletions(-) + +diff --git a/config-ml.in b/config-ml.in +index 68854a4f1..ad0db0781 100644 +--- a/config-ml.in ++++ b/config-ml.in +@@ -301,6 +301,16 @@ arm-*-*) + done + fi + ;; ++loongarch*-*) ++ old_multidirs="${multidirs}" ++ multidirs="" ++ for x in ${old_multidirs}; do ++ case "$x" in ++ `${CC-gcc} --print-multi-directory`) : ;; ++ *) multidirs="${multidirs} ${x}" ;; ++ esac ++ done ++ ;; + m68*-*-*) + if [ x$enable_softfloat = xno ] + then +diff --git a/gcc/config.gcc b/gcc/config.gcc +index 5c378c698..62525c296 100644 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -4965,43 +4965,46 @@ case "${target}" in + esac + ;; + +- loongarch*-*-*) +- supported_defaults="abi arch tune fpu" ++ loongarch*-*) ++ supported_defaults="abi arch tune fpu simd multilib-default" + + # Local variables + unset \ +- abi_pattern abi_default \ +- abiext_pattern abiext_default \ +- arch_pattern arch_default \ +- fpu_pattern fpu_default \ +- tune_pattern tune_default \ +- triplet_os triplet_abi ++ abi_base abi_ext \ ++ arch_pattern arch_default \ ++ fpu_pattern fpu_default \ ++ triplet_os triplet_abi \ ++ strict_align_opt ++ ++ # --with-abi is now obsolete, emit a warning if given. ++ case ${with_abi} in ++ "") ;; ++ *) ++ echo "warning: --with-abi= is now obsolete," \ ++ "the default ABI is derived from your target" \ ++ "triplet ${target}" 1>&2 ++ ;; ++ esac + + # Infer ABI from the triplet. + case ${target} in +- loongarch64-*-*-*f64) +- abi_pattern="lp64d" +- ;; +- loongarch64-*-*-*f32) +- abi_pattern="lp64f" +- ;; +- loongarch64-*-*-*sf) +- abi_pattern="lp64s" +- ;; +- loongarch64-*-*-*) +- abi_pattern="lp64[dfs]" +- abi_default="lp64d" +- ;; ++ loongarch64-*f64) abi_base="lp64d"; abi_ext="base" ;; ++ loongarch64-*f32) abi_base="lp64f"; abi_ext="base" ;; ++ loongarch64-*sf) abi_base="lp64s"; abi_ext="base" ;; ++ loongarch64-*) abi_base="lp64d"; abi_ext="base" ;; + *) + echo "Unsupported target ${target}." 1>&2 + exit 1 + ;; + esac + +- abiext_pattern="*" +- abiext_default="base" +- + # Get the canonical triplet (multiarch specifier). ++ case ${abi_base},${abi_ext} in ++ lp64d,base) triplet_abi="";; ++ lp64f,base) triplet_abi="f32";; ++ lp64s,base) triplet_abi="sf";; ++ esac ++ + case ${target} in + *-linux-gnu*) triplet_os="linux-gnu";; + *-linux-musl*) triplet_os="linux-musl";; +@@ -5010,42 +5013,24 @@ case "${target}" in + exit 1 + ;; + esac ++ la_canonical_triplet="loongarch64-${triplet_os}${triplet_abi}" ++ + + # Perform initial sanity checks on --with-* options. + case ${with_arch} in +- "" | loongarch64 | la464) ;; # OK, append here. ++ "" | abi-default | loongarch64 | la464) ;; # OK, append here. + native) + if test x${host} != x${target}; then + echo "--with-arch=native is illegal for cross-compiler." 1>&2 + exit 1 + fi + ;; +- "") +- echo "Please set a default value for \${with_arch}" \ +- "according to your target triplet \"${target}\"." 1>&2 +- exit 1 +- ;; + *) + echo "Unknown arch in --with-arch=$with_arch" 1>&2 + exit 1 + ;; + esac + +- case ${with_abi} in +- "" | lp64d | lp64f | lp64s) ;; # OK, append here. +- *) +- echo "Unsupported ABI given in --with-abi=$with_abi" 1>&2 +- exit 1 +- ;; +- esac +- +- case ${with_abiext} in +- "" | base) ;; # OK, append here. +- *) +- echo "Unsupported ABI extention type $with_abiext" 1>&2 +- exit 1 +- ;; +- esac + + case ${with_fpu} in + "" | none | 32 | 64) ;; # OK, append here. +@@ -5059,73 +5044,41 @@ case "${target}" in + ;; + esac + +- +- # Set default value for with_abi. +- case ${with_abi} in +- "") +- if test x${abi_default} != x; then +- with_abi=${abi_default} +- else +- with_abi=${abi_pattern} +- fi +- ;; +- +- *) +- if echo "${with_abi}" | grep -E "^${abi_pattern}$" > /dev/null; then +- : # OK +- else +- echo "Incompatible options:" \ +- "--with-abi=${with_abi} and --target=${target}." 1>&2 ++ case ${with_simd} in ++ "" | none) ;; ++ lsx | lasx) # OK, append here. ++ case ${with_fpu} in ++ 64) ;; ++ "") with_fpu=64 ;; ++ *) ++ echo "--with-simd=${with_simd} conflicts with --with-fpu=${with_fpu}" 1>&2 + exit 1 +- fi +- ;; +- esac +- +- case ${with_abi} in +- "lp64d") triplet_abi="";; +- "lp64f") triplet_abi="f32";; +- "lp64s") triplet_abi="sf";; +- esac +- la_canonical_triplet="loongarch64-${triplet_os}${triplet_abi}" +- +- # Set default value for with_abiext (internal) +- case ${with_abiext} in +- "") +- if test x${abiext_default} != x; then +- with_abiext=${abiext_default} +- else +- with_abiext=${abiext_pattern} +- fi ++ ;; ++ esac + ;; + + *) +- if echo "${with_abiext}" | grep -E "^${abiext_pattern}$" > /dev/null; then +- : # OK +- else +- echo "The ABI extension type \"${with_abiext}\"" \ +- "is incompatible with --target=${target}." 1>&2 +- exit 1 +- fi +- ++ echo "Unknown SIMD extension in --with-simd=$with_simd" 1>&2 ++ exit 1 + ;; + esac + + # Infer ISA-related default options from the ABI: pass 1 +- case ${with_abi}/${with_abiext} in ++ case ${abi_base}/${abi_ext} in + lp64*/base) + # architectures that support lp64* ABI +- arch_pattern="native|loongarch64|la464" ++ arch_pattern="native|abi-default|loongarch64|la464" + # default architecture for lp64* ABI +- arch_default="loongarch64" ++ arch_default="abi-default" + ;; + *) +- echo "Unsupported ABI type ${with_abi}/${with_abiext}." 1>&2 ++ echo "Unsupported ABI type ${abi_base}/${abi_ext}." 1>&2 + exit 1 + ;; + esac + + # Infer ISA-related default options from the ABI: pass 2 +- case ${with_abi}/${with_abiext} in ++ case ${abi_base}/${abi_ext} in + lp64d/base) + fpu_pattern="64" + ;; +@@ -5138,7 +5091,7 @@ case "${target}" in + fpu_default="none" + ;; + *) +- echo "Unsupported ABI type ${with_abi}/${with_abiext}." 1>&2 ++ echo "Unsupported ABI type ${abi_base}/${abi_ext}." 1>&2 + exit 1 + ;; + esac +@@ -5157,7 +5110,7 @@ case "${target}" in + if echo "${with_arch}" | grep -E "^${arch_pattern}$" > /dev/null; then + : # OK + else +- echo "${with_abi}/${with_abiext} ABI cannot be implemented with" \ ++ echo "${abi_base}/${abi_ext} ABI cannot be implemented with" \ + "--with-arch=${with_arch}." 1>&2 + exit 1 + fi +@@ -5178,7 +5131,7 @@ case "${target}" in + if echo "${with_fpu}" | grep -E "^${fpu_pattern}$" > /dev/null; then + : # OK + else +- echo "${with_abi}/${with_abiext} ABI cannot be implemented with" \ ++ echo "${abi_base}/${abi_ext} ABI cannot be implemented with" \ + "--with-fpu=${with_fpu}." 1>&2 + exit 1 + fi +@@ -5186,32 +5139,19 @@ case "${target}" in + esac + + +- # Infer default with_tune from with_arch: pass 1 ++ # Check default with_tune configuration using with_arch. + case ${with_arch} in +- native) +- tune_pattern="*" +- tune_default="native" +- ;; + loongarch64) +- tune_pattern="loongarch64|la464" +- tune_default="la464" ++ tune_pattern="native|abi-default|loongarch64|la464" + ;; + *) + # By default, $with_tune == $with_arch +- tune_pattern="$with_arch" ++ tune_pattern="*" + ;; + esac + +- ## Set default value for with_tune. + case ${with_tune} in +- "") +- if test x${tune_default} != x; then +- with_tune=${tune_default} +- else +- with_tune=${tune_pattern} +- fi +- ;; +- ++ "") ;; # OK + *) + if echo "${with_tune}" | grep -E "^${tune_pattern}$" > /dev/null; then + : # OK +@@ -5223,13 +5163,53 @@ case "${target}" in + ;; + esac + ++ # Handle --with-multilib-default ++ if echo "${with_multilib_default}" \ ++ | grep -E -e '[[:space:]]' -e '//' -e '/$' -e '^/' > /dev/null 2>&1; then ++ echo "Invalid argument to --with-multilib-default." 1>&2 ++ exit 1 ++ fi ++ ++ if test x${with_multilib_default} = x; then ++ # Use -march=abi-default by default when building libraries. ++ with_multilib_default="/march=abi-default" ++ else ++ unset parse_state component ++ parse_state=arch ++ for component in $(echo "${with_multilib_default}" | tr '/' ' '); do ++ case ${parse_state},${component} in ++ arch,|arch,abi-default) ++ # ABI-default: use the ABI's default ARCH configuration for ++ # multilib library builds, unless otherwise specified ++ # in --with-multilib-list. ++ with_multilib_default="/march=abi-default" ;; ++ arch,fixed) ++ # Fixed: use the default gcc configuration for all multilib ++ # builds by default. ++ with_multilib_default="" ;; ++ arch,native|arch,loongarch64|arch,la464) # OK, append here. ++ with_multilib_default="/march=${component}" ;; ++ arch,*) ++ with_multilib_default="/march=abi-default" ++ with_multilib_default="${with_multilib_default}/${component}" ;; ++ opts,*) ++ with_multilib_default="${with_multilib_default}/${component}" ;; ++ esac ++ ++ if test x${parse_state} = xarch; then ++ parse_state=opt; ++ fi ++ done ++ unset parse_state component ++ fi ++ + # Handle --with-multilib-list. + if test x"${with_multilib_list}" = x \ + || test x"${with_multilib_list}" = xno \ + || test x"${with_multilib_list}" = xdefault \ + || test x"${enable_multilib}" != xyes; then + +- with_multilib_list="${with_abi}/${with_abiext}" ++ with_multilib_list="${abi_base}/${abi_ext}" + fi + + # Check if the configured default ABI combination is included in +@@ -5245,25 +5225,21 @@ case "${target}" in + # ${with_multilib_list} should not contain whitespaces, + # consecutive commas or slashes. + if echo "${with_multilib_list}" \ +- | grep -E -e "[[:space:]]" -e '[,/][,/]' -e '[,/]$' -e '^[,/]' > /dev/null; then ++ | grep -E -e "[[:space:]]" -e '[,/][,/]' -e '[,/]$' -e '^[,/]' > /dev/null 2>&1; then + echo "Invalid argument to --with-multilib-list." 1>&2 + exit 1 + fi + +- unset component idx elem_abi_base elem_abi_ext elem_tmp ++ unset component elem_abi_base elem_abi_ext elem_tmp parse_state all_abis + for elem in $(echo "${with_multilib_list}" | tr ',' ' '); do +- idx=0 +- while true; do +- idx=$((idx + 1)) +- component=$(echo "${elem}" | awk -F'/' '{print $'"${idx}"'}') +- +- case ${idx} in +- 1) +- # Component 1: Base ABI type ++ unset elem_abi_base elem_abi_ext ++ parse_state="abi-base" ++ ++ for component in $(echo "${elem}" | tr '/' ' '); do ++ if test x${parse_state} = x"abi-base"; then ++ # Base ABI type + case ${component} in +- lp64d) elem_tmp="ABI_BASE_LP64D,";; +- lp64f) elem_tmp="ABI_BASE_LP64F,";; +- lp64s) elem_tmp="ABI_BASE_LP64S,";; ++ lp64d | lp64f | lp64s) elem_tmp="ABI_BASE_$(tr a-z A-Z <<< ${component}),";; + *) + echo "Unknown base ABI \"${component}\" in --with-multilib-list." 1>&2 + exit 1 +@@ -5272,57 +5248,111 @@ case "${target}" in + loongarch_multilib_list_c="${loongarch_multilib_list_c}${elem_tmp}" + loongarch_multilib_list_make="${loongarch_multilib_list_make}mabi=${component}" + elem_abi_base="${component}" +- ;; + +- 2) +- # Component 2: ABI extension type ++ parse_state="abi-ext" ++ continue ++ fi ++ ++ if test x${parse_state} = x"abi-ext"; then ++ # ABI extension type + case ${component} in +- "" | base) +- component="base" +- elem_tmp="ABI_EXT_BASE," +- ;; +- *) +- echo "Unknown ABI extension \"${component}\" in --with-multilib-list." 1>&2 +- exit 1 ++ base) ++ elem_abi_ext="base" ++ loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. ++ parse_state="arch" ++ continue; + ;; + esac +- loongarch_multilib_list_c="${loongarch_multilib_list_c}${elem_tmp}" ++ ++ # The default ABI extension is "base" if unspecified. ++ elem_abi_ext="base" ++ loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," + loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. +- elem_abi_ext="${component}" +- ;; ++ parse_state="arch" ++ fi + +- *) +- # Component 3 and on: optional stuff ++ if test x${parse_state} = x"arch"; then ++ # -march option + case ${component} in +- "") +- # End of component list. +- break ++ native | abi-default | loongarch64 | la464) # OK, append here. ++ # Append -march spec for each multilib variant. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}/march=${component}" ++ parse_state="opts" ++ continue ++ ;; ++ ++ default) ++ # "/default" is equivalent to --with-multilib-default=fixed ++ parse_state="opts" ++ continue + ;; ++ esac ++ ++ # If ARCH is unspecified for this multilib variant, use ${with_multllib_default}. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}" ++ parse_state="opts" ++ fi ++ ++ if test x${parse_state} = x"opts"; then ++ # Other compiler options for building libraries. ++ # (no static sanity check performed) ++ case ${component} in + *) +- echo "Unknown ABI \"${elem}\" in --with-multilib-list." 1>&2 +- exit 1 ++ # Append other components as additional build options ++ # (without the prepending dash). ++ # Their validity should be examined by the compiler. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}/${component}" + ;; + esac +- ;; +- esac ++ fi + done + +- if test x${elem_abi_base} = x${with_abi} \ +- && test x${elem_abi_ext} = x${with_abiext}; then ++ case ${parse_state} in ++ "abi-ext") ++ elem_abi_ext="base" ++ loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}" ++ ;; ++ "arch") ++ # If ARCH is unspecified for this multilib variant, use ${with_multllib_default}. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}" ++ ;; ++ "opts") ++ : ++ ;; ++ esac ++ ++ # Check for repeated configuration of the same multilib variant. ++ if echo "${elem_abi_base}/${elem_abi_ext}" \ ++ | grep -E "^(${all_abis%|})$" >/dev/null 2>&1; then ++ echo "Repeated multilib config of \"${elem_abi_base}/${elem_abi_ext}\" in --with-multilib-list." ++ exit 1 ++ fi ++ all_abis="${all_abis}${elem_abi_base}/${elem_abi_ext}|" ++ ++ ++ # Check if the default ABI configuration of the GCC binary ++ # is included in the enabled multilib variants. ++ if test x${elem_abi_base} = x${abi_base} \ ++ && test x${elem_abi_ext} = x${abi_ext}; then + loongarch_multilib_list_sane=yes + fi + loongarch_multilib_list_make="${loongarch_multilib_list_make}," + done ++ unset component elem_abi_base elem_abi_ext elem_tmp parse_state all_abis ++ + + # Check if the default ABI combination is in the default list. + if test x${loongarch_multilib_list_sane} = xno; then +- if test x${with_abiext} = xbase; then +- with_abiext="" ++ if test x${abi_ext} = xbase; then ++ abi_ext="" + else +- with_abiext="/${with_abiext}" ++ abi_ext="/${abi_ext}" + fi + +- echo "Default ABI combination (${with_abi}${with_abiext})" \ ++ echo "Default ABI combination (${abi_base}${abi_ext})" \ + "not found in --with-multilib-list." 1>&2 + exit 1 + fi +@@ -5783,34 +5813,37 @@ case ${target} in + + # Let --with- flags initialize the enum variables from loongarch.opt. + # See macro definitions from loongarch-opts.h and loongarch-cpu.h. +- case ${with_arch} in +- native) tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_NATIVE" ;; +- la464) tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_LA464" ;; +- loongarch64) tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_LOONGARCH64" ;; +- esac + +- case ${with_tune} in +- native) tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_NATIVE" ;; +- la464) tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_LA464" ;; +- loongarch64) tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_LOONGARCH64" ;; +- esac ++ # Architecture ++ tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_$(echo ${with_arch} | tr a-z- A-Z_)" + +- case ${with_abi} in +- lp64d) tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_LP64D" ;; +- lp64f) tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_LP64F" ;; +- lp64s) tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_LP64S" ;; +- esac ++ # Base ABI type ++ tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_$(echo ${abi_base} | tr a-z- A-Z_)" + +- case ${with_abiext} in ++ # ABI Extension ++ case ${abi_ext} in + base) tm_defines="${tm_defines} DEFAULT_ABI_EXT=ABI_EXT_BASE" ;; + esac + ++ # Microarchitecture ++ if test x${with_tune} != x; then ++ tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_$(echo ${with_tune} | tr a-z- A-Z_)" ++ fi ++ ++ # FPU adjustment + case ${with_fpu} in +- none) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_NOFPU" ;; ++ none) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_NONE" ;; + 32) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_FPU32" ;; + 64) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_FPU64" ;; + esac + ++ # SIMD extensions ++ case ${with_simd} in ++ none) tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_NONE" ;; ++ lsx) tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_SIMD_LSX" ;; ++ lasx) tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_SIMD_LASX" ;; ++ esac ++ + tmake_file="loongarch/t-loongarch $tmake_file" + ;; + +diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings +index 44ebb7ab1..21245f52a 100644 +--- a/gcc/config/loongarch/genopts/loongarch-strings ++++ b/gcc/config/loongarch/genopts/loongarch-strings +@@ -23,6 +23,7 @@ OPTSTR_ARCH arch + OPTSTR_TUNE tune + + STR_CPU_NATIVE native ++STR_CPU_ABI_DEFAULT abi-default + STR_CPU_LOONGARCH64 loongarch64 + STR_CPU_LA464 la464 + +@@ -31,7 +32,7 @@ STR_ISA_BASE_LA64V100 la64 + + # -mfpu + OPTSTR_ISA_EXT_FPU fpu +-STR_ISA_EXT_NOFPU none ++STR_NONE none + STR_ISA_EXT_FPU0 0 + STR_ISA_EXT_FPU32 32 + STR_ISA_EXT_FPU64 64 +@@ -40,6 +41,11 @@ OPTSTR_SOFT_FLOAT soft-float + OPTSTR_SINGLE_FLOAT single-float + OPTSTR_DOUBLE_FLOAT double-float + ++# SIMD extensions ++OPTSTR_ISA_EXT_SIMD simd ++STR_ISA_EXT_LSX lsx ++STR_ISA_EXT_LASX lasx ++ + # -mabi= + OPTSTR_ABI_BASE abi + STR_ABI_BASE_LP64D lp64d +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +index e10618777..c6e337d05 100644 +--- a/gcc/config/loongarch/genopts/loongarch.opt.in ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -17,22 +17,12 @@ + ; . + ; + +-; Variables (macros) that should be exported by loongarch.opt: +-; la_opt_switches, +-; la_opt_abi_base, la_opt_abi_ext, +-; la_opt_cpu_arch, la_opt_cpu_tune, +-; la_opt_fpu, +-; la_cmodel. +- + HeaderInclude + config/loongarch/loongarch-opts.h + + HeaderInclude + config/loongarch/loongarch-str.h + +-Variable +-HOST_WIDE_INT la_opt_switches = 0 +- + ; ISA related options + ;; Base ISA + Enum +@@ -42,14 +32,13 @@ Basic ISAs of LoongArch: + EnumValue + Enum(isa_base) String(@@STR_ISA_BASE_LA64V100@@) Value(ISA_BASE_LA64V100) + +- + ;; ISA extensions / adjustments + Enum + Name(isa_ext_fpu) Type(int) + FPU types of LoongArch: + + EnumValue +-Enum(isa_ext_fpu) String(@@STR_ISA_EXT_NOFPU@@) Value(ISA_EXT_NOFPU) ++Enum(isa_ext_fpu) String(@@STR_NONE@@) Value(ISA_EXT_NONE) + + EnumValue + Enum(isa_ext_fpu) String(@@STR_ISA_EXT_FPU32@@) Value(ISA_EXT_FPU32) +@@ -58,24 +47,48 @@ EnumValue + Enum(isa_ext_fpu) String(@@STR_ISA_EXT_FPU64@@) Value(ISA_EXT_FPU64) + + m@@OPTSTR_ISA_EXT_FPU@@= +-Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPTION_NOT_SEEN) ++Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET) + -m@@OPTSTR_ISA_EXT_FPU@@=FPU Generate code for the given FPU. + + m@@OPTSTR_ISA_EXT_FPU@@=@@STR_ISA_EXT_FPU0@@ +-Target RejectNegative Alias(m@@OPTSTR_ISA_EXT_FPU@@=,@@STR_ISA_EXT_NOFPU@@) ++Target RejectNegative Alias(m@@OPTSTR_ISA_EXT_FPU@@=,@@STR_NONE@@) + + m@@OPTSTR_SOFT_FLOAT@@ +-Target Driver RejectNegative Var(la_opt_switches) Mask(FORCE_SOFTF) Negative(m@@OPTSTR_SINGLE_FLOAT@@) ++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_SINGLE_FLOAT@@) + Prevent the use of all hardware floating-point instructions. + + m@@OPTSTR_SINGLE_FLOAT@@ +-Target Driver RejectNegative Var(la_opt_switches) Mask(FORCE_F32) Negative(m@@OPTSTR_DOUBLE_FLOAT@@) ++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_DOUBLE_FLOAT@@) + Restrict the use of hardware floating-point instructions to 32-bit operations. + + m@@OPTSTR_DOUBLE_FLOAT@@ +-Target Driver RejectNegative Var(la_opt_switches) Mask(FORCE_F64) Negative(m@@OPTSTR_SOFT_FLOAT@@) ++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_SOFT_FLOAT@@) + Allow hardware floating-point instructions to cover both 32-bit and 64-bit operations. + ++Enum ++Name(isa_ext_simd) Type(int) ++SIMD extension levels of LoongArch: ++ ++EnumValue ++Enum(isa_ext_simd) String(@@STR_NONE@@) Value(ISA_EXT_NONE) ++ ++EnumValue ++Enum(isa_ext_simd) String(@@STR_ISA_EXT_LSX@@) Value(ISA_EXT_SIMD_LSX) ++ ++EnumValue ++Enum(isa_ext_simd) String(@@STR_ISA_EXT_LASX@@) Value(ISA_EXT_SIMD_LASX) ++ ++m@@OPTSTR_ISA_EXT_SIMD@@= ++Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET) ++-m@@OPTSTR_ISA_EXT_SIMD@@=SIMD Generate code for the given SIMD extension. ++ ++m@@STR_ISA_EXT_LSX@@ ++Target Driver Defer Var(la_deferred_options) ++Enable LoongArch SIMD Extension (LSX, 128-bit). ++ ++m@@STR_ISA_EXT_LASX@@ ++Target Driver Defer Var(la_deferred_options) ++Enable LoongArch Advanced SIMD Extension (LASX, 256-bit). + + ;; Base target models (implies ISA & tune parameters) + Enum +@@ -85,6 +98,9 @@ LoongArch CPU types: + EnumValue + Enum(cpu_type) String(@@STR_CPU_NATIVE@@) Value(CPU_NATIVE) + ++EnumValue ++Enum(cpu_type) String(@@STR_CPU_ABI_DEFAULT@@) Value(CPU_ABI_DEFAULT) ++ + EnumValue + Enum(cpu_type) String(@@STR_CPU_LOONGARCH64@@) Value(CPU_LOONGARCH64) + +@@ -92,11 +108,11 @@ EnumValue + Enum(cpu_type) String(@@STR_CPU_LA464@@) Value(CPU_LA464) + + m@@OPTSTR_ARCH@@= +-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPTION_NOT_SEEN) ++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) + -m@@OPTSTR_ARCH@@=PROCESSOR Generate code for the given PROCESSOR ISA. + + m@@OPTSTR_TUNE@@= +-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPTION_NOT_SEEN) ++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) + -m@@OPTSTR_TUNE@@=PROCESSOR Generate optimized code for PROCESSOR. + + +@@ -118,13 +134,13 @@ EnumValue + Enum(abi_base) String(@@STR_ABI_BASE_LP64S@@) Value(ABI_BASE_LP64S) + + m@@OPTSTR_ABI_BASE@@= +-Target RejectNegative Joined ToLower Enum(abi_base) Var(la_opt_abi_base) Init(M_OPTION_NOT_SEEN) ++Target RejectNegative Joined ToLower Enum(abi_base) Var(la_opt_abi_base) Init(M_OPT_UNSET) + -m@@OPTSTR_ABI_BASE@@=BASEABI Generate code that conforms to the given BASEABI. + ++ + ;; ABI Extension + Variable +-int la_opt_abi_ext = M_OPTION_NOT_SEEN +- ++int la_opt_abi_ext = M_OPT_UNSET + + mbranch-cost= + Target RejectNegative Joined UInteger Var(loongarch_branch_cost) +@@ -182,7 +198,7 @@ EnumValue + Enum(cmodel) String(@@STR_CMODEL_EXTREME@@) Value(CMODEL_EXTREME) + + mcmodel= +-Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(CMODEL_NORMAL) ++Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET) + Specify the code model. + + mdirect-extern-access +diff --git a/gcc/config/loongarch/la464.md b/gcc/config/loongarch/la464.md +index 0ae177610..89d61bf58 100644 +--- a/gcc/config/loongarch/la464.md ++++ b/gcc/config/loongarch/la464.md +@@ -43,88 +43,88 @@ + ;; Describe instruction reservations. + + (define_insn_reservation "la464_arith" 1 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "arith,clz,const,logical, + move,nop,shift,signext,slt")) + "la464_alu1 | la464_alu2") + + (define_insn_reservation "la464_branch" 1 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "branch,jump,call,condmove,trap")) + "la464_alu1 | la464_alu2") + + (define_insn_reservation "la464_imul" 7 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "imul")) + "la464_alu1 | la464_alu2") + + (define_insn_reservation "la464_idiv_si" 12 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (and (eq_attr "type" "idiv") + (eq_attr "mode" "SI"))) + "la464_alu1 | la464_alu2") + + (define_insn_reservation "la464_idiv_di" 25 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (and (eq_attr "type" "idiv") + (eq_attr "mode" "DI"))) + "la464_alu1 | la464_alu2") + + (define_insn_reservation "la464_load" 4 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "load")) + "la464_mem1 | la464_mem2") + + (define_insn_reservation "la464_gpr_fp" 16 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "mftg,mgtf")) + "la464_mem1") + + (define_insn_reservation "la464_fpload" 4 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "fpload")) + "la464_mem1 | la464_mem2") + + (define_insn_reservation "la464_prefetch" 0 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "prefetch,prefetchx")) + "la464_mem1 | la464_mem2") + + (define_insn_reservation "la464_store" 0 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "store,fpstore,fpidxstore")) + "la464_mem1 | la464_mem2") + + (define_insn_reservation "la464_fadd" 4 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "fadd,fmul,fmadd")) + "la464_falu1 | la464_falu2") + + (define_insn_reservation "la464_fcmp" 2 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "fabs,fcmp,fmove,fneg")) + "la464_falu1 | la464_falu2") + + (define_insn_reservation "la464_fcvt" 4 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "fcvt")) + "la464_falu1 | la464_falu2") + + (define_insn_reservation "la464_fdiv_sf" 12 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt") + (eq_attr "mode" "SF"))) + "la464_falu1 | la464_falu2") + + (define_insn_reservation "la464_fdiv_df" 19 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt") + (eq_attr "mode" "DF"))) + "la464_falu1 | la464_falu2") + + ;; Force single-dispatch for unknown or multi. + (define_insn_reservation "la464_unknown" 1 +- (and (match_test "TARGET_TUNE_LA464") ++ (and (match_test "TARGET_uARCH_LA464") + (eq_attr "type" "unknown,multi,atomic,syncloop")) + "la464_alu1 + la464_alu2 + la464_falu1 + + la464_falu2 + la464_mem1 + la464_mem2") +diff --git a/gcc/config/loongarch/loongarch-c.cc b/gcc/config/loongarch/loongarch-c.cc +index f779a7355..2cf84eec7 100644 +--- a/gcc/config/loongarch/loongarch-c.cc ++++ b/gcc/config/loongarch/loongarch-c.cc +@@ -61,8 +61,8 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile) + builtin_assert ("cpu=loongarch"); + builtin_define ("__loongarch__"); + +- LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", LARCH_ACTUAL_ARCH); +- LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", LARCH_ACTUAL_TUNE); ++ LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", la_target.cpu_arch); ++ LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", la_target.cpu_tune); + + /* Base architecture / ABI. */ + if (TARGET_64BIT) +@@ -99,6 +99,21 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile) + else + builtin_define ("__loongarch_frlen=0"); + ++ if (ISA_HAS_LSX) ++ { ++ builtin_define ("__loongarch_simd"); ++ builtin_define ("__loongarch_sx"); ++ ++ if (!ISA_HAS_LASX) ++ builtin_define ("__loongarch_simd_width=128"); ++ } ++ ++ if (ISA_HAS_LASX) ++ { ++ builtin_define ("__loongarch_asx"); ++ builtin_define ("__loongarch_simd_width=256"); ++ } ++ + /* Add support for FLOAT128_TYPE on the LoongArch architecture. */ + builtin_define ("__FLOAT128_TYPE__"); + +diff --git a/gcc/config/loongarch/loongarch-cpu.cc b/gcc/config/loongarch/loongarch-cpu.cc +index a886dd932..ea05526d7 100644 +--- a/gcc/config/loongarch/loongarch-cpu.cc ++++ b/gcc/config/loongarch/loongarch-cpu.cc +@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see + #include "tm.h" + #include "diagnostic-core.h" + ++#include "loongarch-def.h" + #include "loongarch-opts.h" + #include "loongarch-cpu.h" + #include "loongarch-str.h" +@@ -80,127 +81,191 @@ get_native_prid_str (void) + } + + /* Fill property tables for CPU_NATIVE. */ +-unsigned int +-fill_native_cpu_config (int p_arch_native, int p_tune_native) ++void ++fill_native_cpu_config (struct loongarch_target *tgt) + { +- int ret_cpu_type; ++ int arch_native_p = tgt->cpu_arch == CPU_NATIVE; ++ int tune_native_p = tgt->cpu_tune == CPU_NATIVE; ++ int native_cpu_type = CPU_NATIVE; + + /* Nothing needs to be done unless "-march/tune=native" + is given or implied. */ +- if (!(p_arch_native || p_tune_native)) +- return CPU_NATIVE; ++ if (!arch_native_p && !tune_native_p) ++ return; + + /* Fill cpucfg_cache with the "cpucfg" instruction. */ + cache_cpucfg (); + +- +- /* Fill: loongarch_cpu_default_isa[CPU_NATIVE].base +- With: base architecture (ARCH) +- At: cpucfg_words[1][1:0] */ +- +- #define NATIVE_BASE_ISA (loongarch_cpu_default_isa[CPU_NATIVE].base) +- switch (cpucfg_cache[1] & 0x3) +- { +- case 0x02: +- NATIVE_BASE_ISA = ISA_BASE_LA64V100; +- break; +- +- default: +- if (p_arch_native) +- fatal_error (UNKNOWN_LOCATION, +- "unknown base architecture %<0x%x%>, %qs failed", +- (unsigned int) (cpucfg_cache[1] & 0x3), +- "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); +- } +- +- /* Fill: loongarch_cpu_default_isa[CPU_NATIVE].fpu +- With: FPU type (FP, FP_SP, FP_DP) +- At: cpucfg_words[2][2:0] */ +- +- #define NATIVE_FPU (loongarch_cpu_default_isa[CPU_NATIVE].fpu) +- switch (cpucfg_cache[2] & 0x7) +- { +- case 0x07: +- NATIVE_FPU = ISA_EXT_FPU64; +- break; +- +- case 0x03: +- NATIVE_FPU = ISA_EXT_FPU32; +- break; +- +- case 0x00: +- NATIVE_FPU = ISA_EXT_NOFPU; +- break; +- +- default: +- if (p_arch_native) +- fatal_error (UNKNOWN_LOCATION, +- "unknown FPU type %<0x%x%>, %qs failed", +- (unsigned int) (cpucfg_cache[2] & 0x7), +- "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); +- } +- +- /* Fill: loongarch_cpu_cache[CPU_NATIVE] +- With: cache size info +- At: cpucfg_words[16:20][31:0] */ +- +- int l1d_present = 0, l1u_present = 0; +- int l2d_present = 0; +- uint32_t l1_szword, l2_szword; +- +- l1u_present |= cpucfg_cache[16] & 3; /* bit[1:0]: unified l1 cache */ +- l1d_present |= cpucfg_cache[16] & 4; /* bit[2:2]: l1 dcache */ +- l1_szword = l1d_present ? 18 : (l1u_present ? 17 : 0); +- l1_szword = l1_szword ? cpucfg_cache[l1_szword]: 0; +- +- l2d_present |= cpucfg_cache[16] & 24; /* bit[4:3]: unified l2 cache */ +- l2d_present |= cpucfg_cache[16] & 128; /* bit[7:7]: l2 dcache */ +- l2_szword = l2d_present ? cpucfg_cache[19]: 0; +- +- loongarch_cpu_cache[CPU_NATIVE].l1d_line_size +- = 1 << ((l1_szword & 0x7f000000) >> 24); /* bit[30:24]: log2(linesize) */ +- +- loongarch_cpu_cache[CPU_NATIVE].l1d_size +- = (1 << ((l1_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */ +- * ((l1_szword & 0x0000ffff) + 1) /* bit[15:0]: sets - 1 */ +- * (1 << ((l1_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(linesize) */ +- >> 10; /* in kilobytes */ +- +- loongarch_cpu_cache[CPU_NATIVE].l2d_size +- = (1 << ((l2_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */ +- * ((l2_szword & 0x0000ffff) + 1) /* bit[15:0]: sets - 1 */ +- * (1 << ((l2_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(linesize) */ +- >> 10; /* in kilobytes */ +- +- /* Fill: ret_cpu_type ++ /* Fill: tgt->cpu_arch | tgt->cpu_tune + With: processor ID (PRID) + At: cpucfg_words[0][31:0] */ + + switch (cpucfg_cache[0] & 0x00ffff00) + { + case 0x0014c000: /* LA464 */ +- ret_cpu_type = CPU_LA464; ++ native_cpu_type = CPU_LA464; + break; + + default: +- /* Unknown PRID. This is generally harmless as long as +- the properties above can be obtained via "cpucfg". */ +- if (p_tune_native) ++ /* Unknown PRID. */ ++ if (tune_native_p) + inform (UNKNOWN_LOCATION, "unknown processor ID %<0x%x%>, " + "some tuning parameters will fall back to default", + cpucfg_cache[0]); + break; + } + +- /* Properties that cannot be looked up directly using cpucfg. */ +- loongarch_cpu_issue_rate[CPU_NATIVE] +- = loongarch_cpu_issue_rate[ret_cpu_type]; +- +- loongarch_cpu_multipass_dfa_lookahead[CPU_NATIVE] +- = loongarch_cpu_multipass_dfa_lookahead[ret_cpu_type]; +- +- loongarch_cpu_rtx_cost_data[CPU_NATIVE] +- = loongarch_cpu_rtx_cost_data[ret_cpu_type]; ++ /* if -march=native */ ++ if (arch_native_p) ++ { ++ int tmp; ++ tgt->cpu_arch = native_cpu_type; ++ ++ /* Fill: loongarch_cpu_default_isa[tgt->cpu_arch].base ++ With: base architecture (ARCH) ++ At: cpucfg_words[1][1:0] */ ++ ++ #define PRESET_ARCH (loongarch_cpu_default_isa[tgt->cpu_arch].base) ++ switch (cpucfg_cache[1] & 0x3) ++ { ++ case 0x02: ++ tmp = ISA_BASE_LA64V100; ++ break; ++ ++ default: ++ fatal_error (UNKNOWN_LOCATION, ++ "unknown native base architecture %<0x%x%>, " ++ "%qs failed", (unsigned int) (cpucfg_cache[1] & 0x3), ++ "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); ++ } ++ ++ /* Check consistency with PRID presets. */ ++ if (native_cpu_type != CPU_NATIVE && tmp != PRESET_ARCH) ++ warning (0, "base architecture %qs differs from PRID preset %qs", ++ loongarch_isa_base_strings[tmp], ++ loongarch_isa_base_strings[PRESET_ARCH]); ++ ++ /* Use the native value anyways. */ ++ PRESET_ARCH = tmp; ++ ++ /* Fill: loongarch_cpu_default_isa[tgt->cpu_arch].fpu ++ With: FPU type (FP, FP_SP, FP_DP) ++ At: cpucfg_words[2][2:0] */ ++ ++ #define PRESET_FPU (loongarch_cpu_default_isa[tgt->cpu_arch].fpu) ++ switch (cpucfg_cache[2] & 0x7) ++ { ++ case 0x07: ++ tmp = ISA_EXT_FPU64; ++ break; ++ ++ case 0x03: ++ tmp = ISA_EXT_FPU32; ++ break; ++ ++ case 0x00: ++ tmp = ISA_EXT_NONE; ++ break; ++ ++ default: ++ fatal_error (UNKNOWN_LOCATION, ++ "unknown native FPU type %<0x%x%>, %qs failed", ++ (unsigned int) (cpucfg_cache[2] & 0x7), ++ "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); ++ } ++ ++ /* Check consistency with PRID presets. */ ++ if (native_cpu_type != CPU_NATIVE && tmp != PRESET_FPU) ++ warning (0, "floating-point unit %qs differs from PRID preset %qs", ++ loongarch_isa_ext_strings[tmp], ++ loongarch_isa_ext_strings[PRESET_FPU]); ++ ++ /* Use the native value anyways. */ ++ PRESET_FPU = tmp; ++ ++ ++ /* Fill: loongarch_cpu_default_isa[CPU_NATIVE].simd ++ With: SIMD extension type (LSX, LASX) ++ At: cpucfg_words[2][7:6] */ ++ ++ #define PRESET_SIMD (loongarch_cpu_default_isa[tgt->cpu_arch].simd) ++ switch (cpucfg_cache[2] & 0xc0) ++ { ++ case 0xc0: ++ tmp = ISA_EXT_SIMD_LASX; ++ break; ++ ++ case 0x40: ++ tmp = ISA_EXT_SIMD_LSX; ++ break; ++ ++ case 0x80: ++ tmp = 0; ++ warning (0, "unknown SIMD extension " ++ "(%qs disabled while %qs is enabled), disabling SIMD", ++ loongarch_isa_ext_strings[ISA_EXT_SIMD_LSX], ++ loongarch_isa_ext_strings[ISA_EXT_SIMD_LASX]); ++ break; ++ ++ case 0x00: ++ tmp = 0; ++ break; ++ } ++ ++ /* Check consistency with PRID presets. */ ++ ++ /* ++ if (native_cpu_type != CPU_NATIVE && tmp != PRESET_SIMD) ++ warning (0, "SIMD extension %qs differs from PRID preset %qs", ++ loongarch_isa_ext_strings[tmp], ++ loongarch_isa_ext_strings[PRESET_SIMD]); ++ */ ++ ++ /* Use the native value anyways. */ ++ PRESET_SIMD = tmp; ++ } + +- return ret_cpu_type; ++ if (tune_native_p) ++ { ++ tgt->cpu_tune = native_cpu_type; ++ ++ /* Fill: loongarch_cpu_cache[tgt->cpu_tune] ++ With: cache size info ++ At: cpucfg_words[16:20][31:0] */ ++ ++ #define PRESET_CACHE (loongarch_cpu_cache[tgt->cpu_tune]) ++ struct loongarch_cache native_cache; ++ int l1d_present = 0, l1u_present = 0; ++ int l2d_present = 0; ++ uint32_t l1_szword, l2_szword; ++ ++ l1u_present |= cpucfg_cache[16] & 3; /* bit[1:0]: unified l1 */ ++ l1d_present |= cpucfg_cache[16] & 4; /* bit[2:2]: l1d */ ++ l1_szword = l1d_present ? 18 : (l1u_present ? 17 : 0); ++ l1_szword = l1_szword ? cpucfg_cache[l1_szword]: 0; ++ ++ l2d_present |= cpucfg_cache[16] & 24; /* bit[4:3]: unified l2 */ ++ l2d_present |= cpucfg_cache[16] & 128; /* bit[7:7]: l2d */ ++ l2_szword = l2d_present ? cpucfg_cache[19]: 0; ++ ++ native_cache.l1d_line_size ++ = 1 << ((l1_szword & 0x7f000000) >> 24); /* bit[30:24]: log2(line) */ ++ ++ native_cache.l1d_size ++ = (1 << ((l1_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */ ++ * ((l1_szword & 0x0000ffff) + 1) /* bit[15:0]: sets - 1 */ ++ * (1 << ((l1_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(line) */ ++ >> 10; /* in kibibytes */ ++ ++ native_cache.l2d_size ++ = (1 << ((l2_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */ ++ * ((l2_szword & 0x0000ffff) + 1) /* bit[15:0]: sets - 1 */ ++ * (1 << ((l2_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(linesz) */ ++ >> 10; /* in kibibytes */ ++ ++ /* Use the native value anyways. */ ++ PRESET_CACHE.l1d_line_size = native_cache.l1d_line_size; ++ PRESET_CACHE.l1d_size = native_cache.l1d_size; ++ PRESET_CACHE.l2d_size = native_cache.l2d_size; ++ } + } +diff --git a/gcc/config/loongarch/loongarch-cpu.h b/gcc/config/loongarch/loongarch-cpu.h +index 93d656f70..eacb38774 100644 +--- a/gcc/config/loongarch/loongarch-cpu.h ++++ b/gcc/config/loongarch/loongarch-cpu.h +@@ -21,9 +21,10 @@ along with GCC; see the file COPYING3. If not see + #define LOONGARCH_CPU_H + + #include "system.h" ++#include "loongarch-def.h" + + void cache_cpucfg (void); +-unsigned int fill_native_cpu_config (int p_arch_native, int p_tune_native); ++void fill_native_cpu_config (struct loongarch_target *tgt); + uint32_t get_native_prid (void); + const char* get_native_prid_str (void); + +diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c +index 74d422ce0..d29d5f001 100644 +--- a/gcc/config/loongarch/loongarch-def.c ++++ b/gcc/config/loongarch/loongarch-def.c +@@ -21,25 +21,11 @@ along with GCC; see the file COPYING3. If not see + #include "loongarch-def.h" + #include "loongarch-str.h" + +-/* Default RTX cost initializer. */ +-#define COSTS_N_INSNS(N) ((N) * 4) +-#define DEFAULT_COSTS \ +- .fp_add = COSTS_N_INSNS (1), \ +- .fp_mult_sf = COSTS_N_INSNS (2), \ +- .fp_mult_df = COSTS_N_INSNS (4), \ +- .fp_div_sf = COSTS_N_INSNS (6), \ +- .fp_div_df = COSTS_N_INSNS (8), \ +- .int_mult_si = COSTS_N_INSNS (1), \ +- .int_mult_di = COSTS_N_INSNS (1), \ +- .int_div_si = COSTS_N_INSNS (4), \ +- .int_div_di = COSTS_N_INSNS (6), \ +- .branch_cost = 2, \ +- .memory_latency = 4 +- + /* CPU property tables. */ + const char* + loongarch_cpu_strings[N_TUNE_TYPES] = { + [CPU_NATIVE] = STR_CPU_NATIVE, ++ [CPU_ABI_DEFAULT] = STR_CPU_ABI_DEFAULT, + [CPU_LOONGARCH64] = STR_CPU_LOONGARCH64, + [CPU_LA464] = STR_CPU_LA464, + }; +@@ -49,10 +35,12 @@ loongarch_cpu_default_isa[N_ARCH_TYPES] = { + [CPU_LOONGARCH64] = { + .base = ISA_BASE_LA64V100, + .fpu = ISA_EXT_FPU64, ++ .simd = 0, + }, + [CPU_LA464] = { + .base = ISA_BASE_LA64V100, + .fpu = ISA_EXT_FPU64, ++ .simd = ISA_EXT_SIMD_LASX, + }, + }; + +@@ -84,6 +72,22 @@ loongarch_cpu_align[N_TUNE_TYPES] = { + }, + }; + ++ ++/* Default RTX cost initializer. */ ++#define COSTS_N_INSNS(N) ((N) * 4) ++#define DEFAULT_COSTS \ ++ .fp_add = COSTS_N_INSNS (1), \ ++ .fp_mult_sf = COSTS_N_INSNS (2), \ ++ .fp_mult_df = COSTS_N_INSNS (4), \ ++ .fp_div_sf = COSTS_N_INSNS (6), \ ++ .fp_div_df = COSTS_N_INSNS (8), \ ++ .int_mult_si = COSTS_N_INSNS (1), \ ++ .int_mult_di = COSTS_N_INSNS (1), \ ++ .int_div_si = COSTS_N_INSNS (4), \ ++ .int_div_di = COSTS_N_INSNS (6), \ ++ .branch_cost = 2, \ ++ .memory_latency = 4 ++ + /* The following properties cannot be looked up directly using "cpucfg". + So it is necessary to provide a default value for "unknown native" + tune targets (i.e. -mtune=native while PRID does not correspond to +@@ -103,7 +107,7 @@ loongarch_cpu_rtx_cost_data[N_TUNE_TYPES] = { + }; + + /* RTX costs to use when optimizing for size. */ +-extern const struct loongarch_rtx_cost_data ++const struct loongarch_rtx_cost_data + loongarch_rtx_cost_optimize_size = { + .fp_add = 4, + .fp_mult_sf = 4, +@@ -144,9 +148,11 @@ loongarch_isa_base_strings[N_ISA_BASE_TYPES] = { + + const char* + loongarch_isa_ext_strings[N_ISA_EXT_TYPES] = { +- [ISA_EXT_FPU64] = STR_ISA_EXT_FPU64, ++ [ISA_EXT_NONE] = STR_NONE, + [ISA_EXT_FPU32] = STR_ISA_EXT_FPU32, +- [ISA_EXT_NOFPU] = STR_ISA_EXT_NOFPU, ++ [ISA_EXT_FPU64] = STR_ISA_EXT_FPU64, ++ [ISA_EXT_SIMD_LSX] = STR_ISA_EXT_LSX, ++ [ISA_EXT_SIMD_LASX] = STR_ISA_EXT_LASX, + }; + + const char* +@@ -171,24 +177,29 @@ loongarch_cmodel_strings[] = { + [CMODEL_EXTREME] = STR_CMODEL_EXTREME, + }; + +-const char* +-loongarch_switch_strings[] = { +- [SW_SOFT_FLOAT] = OPTSTR_SOFT_FLOAT, +- [SW_SINGLE_FLOAT] = OPTSTR_SINGLE_FLOAT, +- [SW_DOUBLE_FLOAT] = OPTSTR_DOUBLE_FLOAT, +-}; +- + + /* ABI-related definitions. */ + const struct loongarch_isa + abi_minimal_isa[N_ABI_BASE_TYPES][N_ABI_EXT_TYPES] = { + [ABI_BASE_LP64D] = { +- [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_FPU64}, ++ [ABI_EXT_BASE] = { ++ .base = ISA_BASE_LA64V100, ++ .fpu = ISA_EXT_FPU64, ++ .simd = 0 ++ }, + }, + [ABI_BASE_LP64F] = { +- [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_FPU32}, ++ [ABI_EXT_BASE] = { ++ .base = ISA_BASE_LA64V100, ++ .fpu = ISA_EXT_FPU32, ++ .simd = 0 ++ }, + }, + [ABI_BASE_LP64S] = { +- [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_NOFPU}, ++ [ABI_EXT_BASE] = { ++ .base = ISA_BASE_LA64V100, ++ .fpu = ISA_EXT_NONE, ++ .simd = 0 ++ }, + }, + }; +diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h +index eb87a79a5..0aee7dc19 100644 +--- a/gcc/config/loongarch/loongarch-def.h ++++ b/gcc/config/loongarch/loongarch-def.h +@@ -59,11 +59,13 @@ extern const char* loongarch_isa_base_strings[]; + + /* enum isa_ext_* */ + extern const char* loongarch_isa_ext_strings[]; +-#define ISA_EXT_NOFPU 0 ++#define ISA_EXT_NONE 0 + #define ISA_EXT_FPU32 1 + #define ISA_EXT_FPU64 2 + #define N_ISA_EXT_FPU_TYPES 3 +-#define N_ISA_EXT_TYPES 3 ++#define ISA_EXT_SIMD_LSX 3 ++#define ISA_EXT_SIMD_LASX 4 ++#define N_ISA_EXT_TYPES 5 + + /* enum abi_base */ + extern const char* loongarch_abi_base_strings[]; +@@ -72,6 +74,16 @@ extern const char* loongarch_abi_base_strings[]; + #define ABI_BASE_LP64S 2 + #define N_ABI_BASE_TYPES 3 + ++#define TO_LP64_ABI_BASE(C) (C) ++ ++#define ABI_FPU_64(abi_base) \ ++ (abi_base == ABI_BASE_LP64D) ++#define ABI_FPU_32(abi_base) \ ++ (abi_base == ABI_BASE_LP64F) ++#define ABI_FPU_NONE(abi_base) \ ++ (abi_base == ABI_BASE_LP64S) ++ ++ + /* enum abi_ext */ + extern const char* loongarch_abi_ext_strings[]; + #define ABI_EXT_BASE 0 +@@ -87,55 +99,44 @@ extern const char* loongarch_cmodel_strings[]; + #define CMODEL_EXTREME 5 + #define N_CMODEL_TYPES 6 + +-/* enum switches */ +-/* The "SW_" codes represent command-line switches (options that +- accept no parameters). Definition for other switches that affects +- the target ISA / ABI configuration will also be appended here +- in the future. */ +- +-extern const char* loongarch_switch_strings[]; +-#define SW_SOFT_FLOAT 0 +-#define SW_SINGLE_FLOAT 1 +-#define SW_DOUBLE_FLOAT 2 +-#define N_SWITCH_TYPES 3 +- + /* The common default value for variables whose assignments + are triggered by command-line options. */ + +-#define M_OPTION_NOT_SEEN -1 +-#define M_OPT_ABSENT(opt_enum) ((opt_enum) == M_OPTION_NOT_SEEN) ++#define M_OPT_UNSET -1 ++#define M_OPT_ABSENT(opt_enum) ((opt_enum) == M_OPT_UNSET) + + + /* Internal representation of the target. */ + struct loongarch_isa + { +- unsigned char base; /* ISA_BASE_ */ +- unsigned char fpu; /* ISA_EXT_FPU_ */ ++ int base; /* ISA_BASE_ */ ++ int fpu; /* ISA_EXT_FPU_ */ ++ int simd; /* ISA_EXT_SIMD_ */ + }; + + struct loongarch_abi + { +- unsigned char base; /* ABI_BASE_ */ +- unsigned char ext; /* ABI_EXT_ */ ++ int base; /* ABI_BASE_ */ ++ int ext; /* ABI_EXT_ */ + }; + + struct loongarch_target + { + struct loongarch_isa isa; + struct loongarch_abi abi; +- unsigned char cpu_arch; /* CPU_ */ +- unsigned char cpu_tune; /* same */ +- unsigned char cpu_native; /* same */ +- unsigned char cmodel; /* CMODEL_ */ ++ int cpu_arch; /* CPU_ */ ++ int cpu_tune; /* same */ ++ int cmodel; /* CMODEL_ */ + }; + + /* CPU properties. */ + /* index */ + #define CPU_NATIVE 0 +-#define CPU_LOONGARCH64 1 +-#define CPU_LA464 2 +-#define N_ARCH_TYPES 3 +-#define N_TUNE_TYPES 3 ++#define CPU_ABI_DEFAULT 1 ++#define CPU_LOONGARCH64 2 ++#define CPU_LA464 3 ++#define N_ARCH_TYPES 4 ++#define N_TUNE_TYPES 4 + + /* parallel tables. */ + extern const char* loongarch_cpu_strings[]; +diff --git a/gcc/config/loongarch/loongarch-driver.cc b/gcc/config/loongarch/loongarch-driver.cc +index 0adcc923b..b3626984d 100644 +--- a/gcc/config/loongarch/loongarch-driver.cc ++++ b/gcc/config/loongarch/loongarch-driver.cc +@@ -26,122 +26,137 @@ along with GCC; see the file COPYING3. If not see + #include "tm.h" + #include "obstack.h" + #include "diagnostic-core.h" ++#include "opts.h" + + #include "loongarch-opts.h" + #include "loongarch-driver.h" + +-static int +- opt_arch_driver = M_OPTION_NOT_SEEN, +- opt_tune_driver = M_OPTION_NOT_SEEN, +- opt_fpu_driver = M_OPTION_NOT_SEEN, +- opt_abi_base_driver = M_OPTION_NOT_SEEN, +- opt_abi_ext_driver = M_OPTION_NOT_SEEN, +- opt_cmodel_driver = M_OPTION_NOT_SEEN; +- +-int opt_switches = 0; +- + /* This flag is set to 1 if we believe that the user might be avoiding + linking (implicitly) against something from the startfile search paths. */ + static int no_link = 0; + +-#define LARCH_DRIVER_SET_M_FLAG(OPTS_ARRAY, N_OPTS, FLAG, STR) \ +- for (int i = 0; i < (N_OPTS); i++) \ +- { \ +- if ((OPTS_ARRAY)[i] != 0) \ +- if (strcmp ((STR), (OPTS_ARRAY)[i]) == 0) \ +- (FLAG) = i; \ +- } +- + /* Use the public obstack from the gcc driver (defined in gcc.c). + This is for allocating space for the returned string. */ + extern struct obstack opts_obstack; + +-#define APPEND_LTR(S) \ +- obstack_grow (&opts_obstack, (const void*) (S), \ +- sizeof ((S)) / sizeof (char) -1) +- +-#define APPEND_VAL(S) \ +- obstack_grow (&opts_obstack, (const void*) (S), strlen ((S))) ++const char* ++la_driver_init (int argc ATTRIBUTE_UNUSED, const char **argv ATTRIBUTE_UNUSED) ++{ ++ /* Initialize all fields of la_target to -1 */ ++ loongarch_init_target (&la_target, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET, ++ M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET); ++ return ""; ++} + ++const char* ++driver_set_no_link (int argc ATTRIBUTE_UNUSED, ++ const char **argv ATTRIBUTE_UNUSED) ++{ ++ no_link = 1; ++ return ""; ++} + + const char* +-driver_set_m_flag (int argc, const char **argv) ++driver_set_m_parm (int argc, const char **argv) + { +- int parm_off = 0; ++ gcc_assert (argc == 2); ++ ++#define LARCH_DRIVER_PARSE_PARM(OPT_IDX, NAME, OPTSTR_LIST, \ ++ OPT_IDX_LO, OPT_IDX_HI) \ ++ if (strcmp (argv[0], OPTSTR_##NAME) == 0) \ ++ for (int i = (OPT_IDX_LO); i < (OPT_IDX_HI); i++) \ ++ { \ ++ if ((OPTSTR_LIST)[i] != 0) \ ++ if (strcmp (argv[1], (OPTSTR_LIST)[i]) == 0) \ ++ { \ ++ (OPT_IDX) = i; \ ++ return 0; \ ++ } \ ++ } + +- if (argc != 1) +- return "%eset_m_flag requires exactly 1 argument."; ++ LARCH_DRIVER_PARSE_PARM (la_target.abi.base, ABI_BASE, \ ++ loongarch_abi_base_strings, 0, N_ABI_BASE_TYPES) + +-#undef PARM +-#define PARM (argv[0] + parm_off) ++ LARCH_DRIVER_PARSE_PARM (la_target.isa.fpu, ISA_EXT_FPU, \ ++ loongarch_isa_ext_strings, 0, N_ISA_EXT_FPU_TYPES) + +-/* Note: sizeof (OPTSTR_##NAME) equals the length of "